Compare commits

..

2 Commits

Author SHA1 Message Date
Jade Ellis
9d0c89bd04 fix(v12): Create tombstone event on room upgrade 2025-09-24 18:22:16 +00:00
nexy7574
965db4aa43 fix: V12 room upgrades 2025-09-24 18:22:16 +00:00
84 changed files with 2420 additions and 2391 deletions

View File

@@ -40,12 +40,6 @@ creds:
- registry: registry.gitlab.com
user: "{{env \"GITLAB_USERNAME\"}}"
pass: "{{env \"GITLAB_TOKEN\"}}"
- registry: git.nexy7574.co.uk
user: "{{env \"N7574_GIT_USERNAME\"}}"
pass: "{{env \"N7574_GIT_TOKEN\"}}"
- registry: ghcr.io
user: "{{env \"GH_PACKAGES_USER\"}}"
pass: "{{env \"GH_PACKAGES_TOKEN\"}}"
# Global defaults
defaults:
@@ -59,11 +53,3 @@ sync:
target: registry.gitlab.com/continuwuity/continuwuity
type: repository
<<: *tags-main
- source: *source
target: git.nexy7574.co.uk/mirrored/continuwuity
type: repository
<<: *tags-releases
- source: *source
target: ghcr.io/continuwuity/continuwuity
type: repository
<<: *tags-main

View File

@@ -32,7 +32,7 @@ jobs:
echo "Debian distribution: $DISTRIBUTION ($VERSION)"
- name: Checkout repository with full history
uses: https://code.forgejo.org/actions/checkout@v5
uses: https://code.forgejo.org/actions/checkout@v4
with:
fetch-depth: 0
@@ -132,7 +132,7 @@ jobs:
path: ${{ steps.cargo-deb.outputs.path }}
- name: Publish to Forgejo package registry
if: ${{ forge.event_name == 'push' || forge.event_name == 'workflow_dispatch' || forge.event_name == 'schedule' }}
if: ${{ forge.event_name == 'push' || forge.event_name == 'workflow_dispatch' }}
run: |
OWNER="continuwuation"
DISTRIBUTION=${{ steps.debian-version.outputs.distribution }}

View File

@@ -30,7 +30,7 @@ jobs:
echo "Fedora version: $VERSION"
- name: Checkout repository with full history
uses: https://code.forgejo.org/actions/checkout@v5
uses: https://code.forgejo.org/actions/checkout@v4
with:
fetch-depth: 0
@@ -250,7 +250,7 @@ jobs:
path: artifacts/*debuginfo*.rpm
- name: Publish to RPM Package Registry
if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' }}
if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }}
run: |
# Find the main binary RPM (exclude debug and source RPMs)
RPM=$(find artifacts -name "continuwuity-*.rpm" \

View File

@@ -55,7 +55,7 @@ jobs:
- name: Setup Node.js
if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20'
uses: https://github.com/actions/setup-node@v6
uses: https://github.com/actions/setup-node@v5
with:
node-version: 22

View File

@@ -24,7 +24,7 @@ jobs:
steps:
- name: 📦 Setup Node.js
uses: https://github.com/actions/setup-node@v6
uses: https://github.com/actions/setup-node@v5
with:
node-version: "22"

View File

@@ -11,13 +11,7 @@ on:
required: false
default: false
type: boolean
push:
branches:
- main
paths:
# Re-run when config changes
- '.forgejo/regsync/regsync.yml'
- '.forgejo/workflows/mirror-images.yml'
concurrency:
group: "mirror-images"
cancel-in-progress: true
@@ -30,25 +24,12 @@ jobs:
BUILTIN_REGISTRY_PASSWORD: ${{ secrets.BUILTIN_REGISTRY_PASSWORD }}
GITLAB_USERNAME: ${{ vars.GITLAB_USERNAME }}
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
N7574_GIT_USERNAME: ${{ vars.N7574_GIT_USERNAME }}
N7574_GIT_TOKEN: ${{ secrets.N7574_GIT_TOKEN }}
GH_PACKAGES_USER: ${{ vars.GH_PACKAGES_USER }}
GH_PACKAGES_TOKEN: ${{ secrets.GH_PACKAGES_TOKEN }}
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
persist-credentials: false
# - uses: https://github.com/actions/create-github-app-token@v2
# id: app-token
# with:
# app-id: ${{ vars.GH_APP_ID }}
# private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
# github-api-url: https://api.github.com
# owner: continuwuity
# repositories: continuwuity
- name: Install regctl
uses: https://forgejo.ellis.link/continuwuation/regclient-actions/regctl-installer@main
with:

View File

@@ -43,7 +43,7 @@ jobs:
name: Renovate
runs-on: ubuntu-latest
container:
image: ghcr.io/renovatebot/renovate:41.146.4@sha256:bb70194b7405faf10a6f279b60caa10403a440ba37d158c5a4ef0ae7b67a0f92
image: ghcr.io/renovatebot/renovate:41.127.2@sha256:66bc84e2f889025fbb3c9df863500dcc18bc64ac85bcf629d015064377d77f31
options: --tmpfs /tmp:exec
steps:
- name: Checkout

View File

@@ -7,8 +7,6 @@ on:
- "Cargo.lock"
- "Cargo.toml"
- "rust-toolchain.toml"
- "nix/**/*"
- ".forgejo/workflows/update-flake-hashes.yml"
jobs:
update-flake-hashes:
@@ -16,13 +14,13 @@ jobs:
steps:
- uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
with:
fetch-depth: 0
fetch-depth: 1
fetch-tags: false
fetch-single-branch: true
submodules: false
persist-credentials: false
- uses: https://github.com/cachix/install-nix-action@7ab6e7fd29da88e74b1e314a4ae9ac6b5cda3801 # v31.8.0
- uses: https://github.com/cachix/install-nix-action@a809471b5c7c913aa67bec8f459a11a0decc3fce # v31.6.2
with:
nix_path: nixpkgs=channel:nixos-unstable
@@ -41,22 +39,13 @@ jobs:
echo "Base: $base"
echo "HEAD: $(git rev-parse HEAD)"
git diff --name-only $base HEAD > changed_files.txt
echo "detected changes in $(cat changed_files.txt)"
# Join files with commas
files=$(paste -sd, changed_files.txt)
echo "files=$files" >> $FORGEJO_OUTPUT
- name: Debug output
run: |
echo "State of output"
echo "Changed files: ${{ steps.changes.outputs.files }}"
echo "files=$(cat changed_files.txt)" >> $FORGEJO_OUTPUT
- name: Get new toolchain hash
if: contains(steps.changes.outputs.files, 'Cargo.toml') || contains(steps.changes.outputs.files, 'Cargo.lock') || contains(steps.changes.outputs.files, 'rust-toolchain.toml')
run: |
# Set the current sha256 to an empty hash to make `nix build` calculate a new one
awk '/fromToolchainFile *\{/{found=1; print; next} found && /sha256 =/{sub(/sha256 = .*/, "sha256 = lib.fakeSha256;"); found=0} 1' nix/packages/rust.nix > temp.nix
mv temp.nix nix/packages/rust.nix
awk '/fromToolchainFile *\{/{found=1; print; next} found && /sha256 =/{sub(/sha256 = .*/, "sha256 = pkgsHost.lib.fakeSha256;"); found=0} 1' flake.nix > temp.nix && mv temp.nix flake.nix
# Build continuwuity and filter for the new hash
# We do `|| true` because we want this to fail without stopping the workflow
@@ -64,21 +53,19 @@ jobs:
# Place the new hash in place of the empty hash
new_hash=$(cat new_toolchain_hash.txt)
sed -i "s|lib.fakeSha256|\"$new_hash\"|" nix/packages/rust.nix
sed -i "s|pkgsHost.lib.fakeSha256|\"$new_hash\"|" flake.nix
echo "New hash:"
awk -F'"' '/fromToolchainFile/{found=1; next} found && /sha256 =/{print $2; found=0}' nix/packages/rust.nix
awk -F'"' '/fromToolchainFile/{found=1; next} found && /sha256 =/{print $2; found=0}' flake.nix
echo "Expected new hash:"
cat new_toolchain_hash.txt
rm new_toolchain_hash.txt
- name: Get new rocksdb hash
if: contains(steps.changes.outputs.files, '.nix') || contains(steps.changes.outputs.files, 'flake.lock')
run: |
# Set the current sha256 to an empty hash to make `nix build` calculate a new one
awk '/repo = "rocksdb";/{found=1; print; next} found && /sha256 =/{sub(/sha256 = .*/, "sha256 = lib.fakeSha256;"); found=0} 1' nix/packages/rocksdb/package.nix > temp.nix
mv temp.nix nix/packages/rocksdb/package.nix
awk '/repo = "rocksdb";/{found=1; print; next} found && /sha256 =/{sub(/sha256 = .*/, "sha256 = pkgsHost.lib.fakeSha256;"); found=0} 1' flake.nix > temp.nix && mv temp.nix flake.nix
# Build continuwuity and filter for the new hash
# We do `|| true` because we want this to fail without stopping the workflow
@@ -86,17 +73,17 @@ jobs:
# Place the new hash in place of the empty hash
new_hash=$(cat new_rocksdb_hash.txt)
sed -i "s|lib.fakeSha256|\"$new_hash\"|" nix/packages/rocksdb/package.nix
sed -i "s|pkgsHost.lib.fakeSha256|\"$new_hash\"|" flake.nix
echo "New hash:"
awk -F'"' '/repo = "rocksdb";/{found=1; next} found && /sha256 =/{print $2; found=0}' nix/packages/rocksdb/package.nix
awk -F'"' '/repo = "rocksdb";/{found=1; next} found && /sha256 =/{print $2; found=0}' flake.nix
echo "Expected new hash:"
cat new_rocksdb_hash.txt
rm new_rocksdb_hash.txt
- name: Show diff
run: git diff flake.nix nix
run: git diff flake.nix
- name: Push changes
run: |

View File

@@ -7,7 +7,7 @@ default_stages:
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
rev: v5.0.0
hooks:
- id: fix-byte-order-marker
- id: check-case-conflict
@@ -23,7 +23,7 @@ repos:
- id: check-added-large-files
- repo: https://github.com/crate-ci/typos
rev: v1.39.0
rev: v1.26.0
hooks:
- id: typos
- id: typos

1639
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -166,8 +166,8 @@ default-features = false
features = ["raw_value"]
# Used for appservice registration files
[workspace.dependencies.serde-saphyr]
version = "0.0.7"
[workspace.dependencies.serde_yml]
version = "0.0.12"
# Used to load forbidden room/user regex from config
[workspace.dependencies.serde_regex]
@@ -210,13 +210,13 @@ default-features = false
version = "0.1.41"
default-features = false
[workspace.dependencies.tracing-subscriber]
version = "0.3.20"
version = "0.3.19"
default-features = false
features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"]
[workspace.dependencies.tracing-journald]
version = "0.3.1"
[workspace.dependencies.tracing-core]
version = "0.1.34"
version = "0.1.33"
default-features = false
# for URL previews
@@ -286,7 +286,7 @@ features = [
]
[workspace.dependencies.hyper-util]
version = "=0.1.17"
version = "0.1.11"
default-features = false
features = [
"server-auto",
@@ -351,7 +351,7 @@ version = "0.1.2"
# Used for matrix spec type definitions and helpers
[workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
rev = "50b2a91b2ab8f9830eea80b9911e11234e0eac66"
rev = "d18823471ab3c09e77ff03eea346d4c07e572654"
features = [
"compat",
"rand",
@@ -412,27 +412,28 @@ default-features = false
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
[workspace.dependencies.opentelemetry]
version = "0.31.0"
version = "0.30.0"
[workspace.dependencies.tracing-flame]
version = "0.2.0"
[workspace.dependencies.tracing-opentelemetry]
version = "0.32.0"
version = "0.31.0"
[workspace.dependencies.opentelemetry_sdk]
version = "0.31.0"
version = "0.30.0"
features = ["rt-tokio"]
[workspace.dependencies.opentelemetry-otlp]
version = "0.31.0"
version = "0.30.0"
features = ["http", "trace", "logs", "metrics"]
[workspace.dependencies.opentelemetry-jaeger-propagator]
version = "0.30.0"
# optional sentry metrics for crash/panic reporting
[workspace.dependencies.sentry]
version = "0.45.0"
version = "0.42.0"
default-features = false
features = [
"backtrace",
@@ -448,9 +449,9 @@ features = [
]
[workspace.dependencies.sentry-tracing]
version = "0.45.0"
version = "0.42.0"
[workspace.dependencies.sentry-tower]
version = "0.45.0"
version = "0.42.0"
# jemalloc usage
[workspace.dependencies.tikv-jemalloc-sys]
@@ -476,7 +477,7 @@ default-features = false
features = ["use_std"]
[workspace.dependencies.console-subscriber]
version = "0.5"
version = "0.4"
[workspace.dependencies.nix]
version = "0.30.1"
@@ -550,9 +551,9 @@ features = ["std"]
version = "1.0.2"
[workspace.dependencies.ldap3]
version = "0.12.0"
version = "0.11.5"
default-features = false
features = ["sync", "tls-rustls", "rustls-provider"]
features = ["sync", "tls-rustls"]
[workspace.dependencies.resolv-conf]
version = "0.7.5"
@@ -563,7 +564,19 @@ version = "0.7.5"
# backport of [https://github.com/tokio-rs/tracing/pull/2956] to the 0.1.x branch of tracing.
# we can switch back to upstream if #2956 is merged and backported in the upstream repo.
# https://forgejo.ellis.link/continuwuation/tracing/commit/b348dca742af641c47bc390261f60711c2af573c
[patch.crates-io.tracing-subscriber]
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
[patch.crates-io.tracing]
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
[patch.crates-io.tracing-core]
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
[patch.crates-io.tracing-log]
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/src/branch/main/.patchy/0002-add-tab-completion-callback.patch
# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/src/branch/main/.patchy/0001-add-event-for-ctrl.patch
@@ -587,7 +600,7 @@ rev = "9c8e51510c35077df888ee72a36b4b05637147da"
# reverts hyperium#148 conflicting with our delicate federation resolver hooks
[patch.crates-io.hyper-util]
git = "https://forgejo.ellis.link/continuwuation/hyper-util"
rev = "5886d5292bf704c246206ad72d010d674a7b77d0"
rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941"
#
# Our crates
@@ -947,7 +960,7 @@ semicolon_outside_block = "warn"
str_to_string = "warn"
string_lit_chars_any = "warn"
string_slice = "warn"
string_to_string = "warn"
suspicious_xor_used_as_pow = "warn"
tests_outside_test_module = "warn"
try_err = "warn"

View File

@@ -957,21 +957,6 @@
#
#rocksdb_bottommost_compression = true
# Compression algorithm for RocksDB's Write-Ahead-Log (WAL).
#
# At present, only ZSTD compression is supported by RocksDB for WAL
# compression. Enabling this can reduce WAL size at the expense of some
# CPU usage during writes.
#
# The options are:
# - "none" = No compression
# - "zstd" = ZSTD compression
#
# For more information on WAL compression, see:
# https://github.com/facebook/rocksdb/wiki/WAL-Compression
#
#rocksdb_wal_compression = "zstd"
# Database recovery mode (for RocksDB WAL corruption).
#
# Use this option when the server reports corruption and refuses to start.
@@ -1512,19 +1497,6 @@
#
#block_non_admin_invites = false
# Enable or disable making requests to MSC4284 Policy Servers.
# It is recommended you keep this enabled unless you experience frequent
# connectivity issues, such as in a restricted networking environment.
#
#enable_msc4284_policy_servers = true
# Enable running locally generated events through configured MSC4284
# policy servers. You may wish to disable this if your server is
# single-user for a slight speed benefit in some rooms, but otherwise
# should leave it enabled.
#
#policy_server_check_own_events = true
# Allow admins to enter commands in rooms other than "#admins" (admin
# room) by prefixing your message with "\!admin" or "\\!admin" followed up
# a normal continuwuity admin command. The reply will be publicly visible

View File

@@ -48,7 +48,7 @@ EOF
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.15.10
ENV BINSTALL_VERSION=1.15.5
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree

View File

@@ -18,7 +18,7 @@ RUN --mount=type=cache,target=/etc/apk/cache apk add \
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.15.10
ENV BINSTALL_VERSION=1.15.5
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree

View File

@@ -1078,10 +1078,7 @@ ###### **Subcommands:**
* `delete` — - Deletes a single media file from our database and on the filesystem via a single MXC URL or event ID (not redacted)
* `delete-list` — - Deletes a codeblock list of MXC URLs from our database and on the filesystem. This will always ignore errors
* `delete-past-remote-media` — Deletes all remote (and optionally local) media created before/after
[duration] ago, using filesystem metadata first created at date, or
fallback to last modified date. This will always ignore errors by
default.
* `delete-past-remote-media` - Deletes all remote (and optionally local) media created before or after [duration] time using filesystem metadata first created at date, or fallback to last modified date. This will always ignore errors by default
* `delete-all-from-user` — - Deletes all the local media from a local user on our server. This will always ignore errors by default
* `delete-all-from-server` — - Deletes all remote media from the specified remote server. This will always ignore errors by default
* `get-file-info`
@@ -1113,25 +1110,13 @@ ## `admin media delete-list`
## `admin media delete-past-remote-media`
Deletes all remote (and optionally local) media created before/after
[duration] ago, using filesystem metadata first created at date, or
fallback to last modified date. This will always ignore errors by
default.
* Examples:
* Delete all remote media older than a year:
`!admin media delete-past-remote-media -b 1y`
* Delete all remote and local media from 3 days ago, up until now:
`!admin media delete-past-remote-media -a 3d --yes-i-want-to-delete-local-media`
- Deletes all remote (and optionally local) media created before or after [duration] time using filesystem metadata first created at date, or fallback to last modified date. This will always ignore errors by default
**Usage:** `admin media delete-past-remote-media [OPTIONS] <DURATION>`
###### **Arguments:**
* `<DURATION>` — - The relative time (e.g. 30s, 5m, 7d) from now within which to search
* `<DURATION>` — - The relative time (e.g. 30s, 5m, 7d) within which to search
###### **Options:**

View File

@@ -241,7 +241,7 @@ ## Documentation
### Code Comments
- Reference related documentation or parts of the specification
- When a task has multiple ways of being achieved, explain your reasoning for your decision
- When a task has multiple ways of being acheved, explain your reasoning for your decision
- Update comments when code changes
```rs

451
flake.lock generated
View File

@@ -1,28 +1,94 @@
{
"nodes": {
"advisory-db": {
"flake": false,
"attic": {
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat",
"flake-parts": "flake-parts",
"nix-github-actions": "nix-github-actions",
"nixpkgs": "nixpkgs",
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1761112158,
"narHash": "sha256-RIXu/7eyKpQHjsPuAUODO81I4ni8f+WYSb7K4mTG6+0=",
"owner": "rustsec",
"repo": "advisory-db",
"rev": "58f3aaec0e1776f4a900737be8cd7cb00972210d",
"lastModified": 1757683818,
"narHash": "sha256-q7q0pWT+wu5AUU1Qlbwq8Mqb+AzHKhaMCVUq/HNZfo8=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "7c5d79ad62cda340cb8c80c99b921b7b7ffacf69",
"type": "github"
},
"original": {
"owner": "rustsec",
"repo": "advisory-db",
"owner": "zhaofengli",
"ref": "main",
"repo": "attic",
"type": "github"
}
},
"cachix": {
"inputs": {
"devenv": "devenv",
"flake-compat": "flake-compat_2",
"git-hooks": "git-hooks",
"nixpkgs": "nixpkgs_2"
},
"locked": {
"lastModified": 1756385612,
"narHash": "sha256-+NU5MMhuPHHRyvZZWNFG7zt+leRSPsJu1MwhOUzkPUk=",
"owner": "cachix",
"repo": "cachix",
"rev": "dc24688cd67518c3711d511fa369c0f5a131063a",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "master",
"repo": "cachix",
"type": "github"
}
},
"cachix_2": {
"inputs": {
"devenv": [
"cachix",
"devenv"
],
"flake-compat": [
"cachix",
"devenv"
],
"git-hooks": [
"cachix",
"devenv",
"git-hooks"
],
"nixpkgs": [
"cachix",
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1748883665,
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
"owner": "cachix",
"repo": "cachix",
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "latest",
"repo": "cachix",
"type": "github"
}
},
"crane": {
"locked": {
"lastModified": 1760924934,
"narHash": "sha256-tuuqY5aU7cUkR71sO2TraVKK2boYrdW3gCSXUkF4i44=",
"lastModified": 1751562746,
"narHash": "sha256-smpugNIkmDeicNz301Ll1bD7nFOty97T79m4GUMUczA=",
"owner": "ipetkov",
"repo": "crane",
"rev": "c6b4d5308293d0d04fcfeee92705017537cad02f",
"rev": "aed2020fd3dc26e1e857d4107a5a67a33ab6c1fd",
"type": "github"
},
"original": {
@@ -31,6 +97,53 @@
"type": "github"
}
},
"crane_2": {
"locked": {
"lastModified": 1757183466,
"narHash": "sha256-kTdCCMuRE+/HNHES5JYsbRHmgtr+l9mOtf5dpcMppVc=",
"owner": "ipetkov",
"repo": "crane",
"rev": "d599ae4847e7f87603e7082d73ca673aa93c916d",
"type": "github"
},
"original": {
"owner": "ipetkov",
"ref": "master",
"repo": "crane",
"type": "github"
}
},
"devenv": {
"inputs": {
"cachix": "cachix_2",
"flake-compat": [
"cachix",
"flake-compat"
],
"git-hooks": [
"cachix",
"git-hooks"
],
"nix": "nix",
"nixpkgs": [
"cachix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1754404745,
"narHash": "sha256-BdbW/iTImczgcuATgQIa9sPGuYIBxVq2xqcvICsa2AQ=",
"owner": "cachix",
"repo": "devenv",
"rev": "6563b21105168f90394dfaf58284b078af2d7275",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
@@ -39,20 +152,53 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1761115517,
"narHash": "sha256-Fev/ag/c3Fp3JBwHfup3lpA5FlNXfkoshnQ7dssBgJ0=",
"lastModified": 1758004879,
"narHash": "sha256-kV7tQzcNbmo58wg2uE2MQ/etaTx+PxBMHeNrLP8vOgk=",
"owner": "nix-community",
"repo": "fenix",
"rev": "320433651636186ea32b387cff05d6bbfa30cea7",
"rev": "07e5ce53dd020e6b337fdddc934561bee0698fa2",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "main",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_3": {
"flake": false,
"locked": {
"lastModified": 1747046372,
@@ -71,14 +217,17 @@
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
"nixpkgs-lib": [
"attic",
"nixpkgs"
]
},
"locked": {
"lastModified": 1760948891,
"narHash": "sha256-TmWcdiUUaWk8J4lpjzu4gCGxWY6/Ok7mOK4fIFfBuU4=",
"lastModified": 1751413152,
"narHash": "sha256-Tyw1RjYEsp5scoigs1384gIg6e0GoBVjms4aXFfRssQ=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "864599284fc7c0ba6357ed89ed5e2cd5040f0c04",
"rev": "77826244401ea9de6e3bac47c2db46005e1f30b5",
"type": "github"
},
"original": {
@@ -87,13 +236,214 @@
"type": "github"
}
},
"flake-parts_2": {
"inputs": {
"nixpkgs-lib": [
"cachix",
"devenv",
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1733312601,
"narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"ref": "main",
"repo": "flake-utils",
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": [
"cachix",
"flake-compat"
],
"gitignore": "gitignore",
"nixpkgs": [
"cachix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1750779888,
"narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"cachix",
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": [
"cachix",
"devenv",
"flake-compat"
],
"flake-parts": "flake-parts_2",
"git-hooks-nix": [
"cachix",
"devenv",
"git-hooks"
],
"nixpkgs": [
"cachix",
"devenv",
"nixpkgs"
],
"nixpkgs-23-11": [
"cachix",
"devenv"
],
"nixpkgs-regression": [
"cachix",
"devenv"
]
},
"locked": {
"lastModified": 1752773918,
"narHash": "sha256-dOi/M6yNeuJlj88exI+7k154z+hAhFcuB8tZktiW7rg=",
"owner": "cachix",
"repo": "nix",
"rev": "031c3cf42d2e9391eee373507d8c12e0f9606779",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "devenv-2.30",
"repo": "nix",
"type": "github"
}
},
"nix-filter": {
"locked": {
"lastModified": 1757882181,
"narHash": "sha256-+cCxYIh2UNalTz364p+QYmWHs0P+6wDhiWR4jDIKQIU=",
"owner": "numtide",
"repo": "nix-filter",
"rev": "59c44d1909c72441144b93cf0f054be7fe764de5",
"type": "github"
},
"original": {
"owner": "numtide",
"ref": "main",
"repo": "nix-filter",
"type": "github"
}
},
"nix-github-actions": {
"inputs": {
"nixpkgs": [
"attic",
"nixpkgs"
]
},
"locked": {
"lastModified": 1737420293,
"narHash": "sha256-F1G5ifvqTpJq7fdkT34e/Jy9VCyzd5XfJ9TO8fHhJWE=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "f4158fa080ef4503c8f4c820967d946c2af31ec9",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1760878510,
"narHash": "sha256-K5Osef2qexezUfs0alLvZ7nQFTGS9DL2oTVsIXsqLgs=",
"lastModified": 1751949589,
"narHash": "sha256-mgFxAPLWw0Kq+C8P3dRrZrOYEQXOtKuYVlo9xvPntt8=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5e2a59a5b1a82f89f2c7e598302a9cacebb72a67",
"rev": "9b008d60392981ad674e04016d25619281550a9d",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1751741127,
"narHash": "sha256-t75Shs76NgxjZSgvvZZ9qOmz5zuBE8buUaYD28BMTxg=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "29e290002bfff26af1db6f64d070698019460302",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-25.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1754214453,
"narHash": "sha256-Q/I2xJn/j1wpkGhWkQnm20nShYnG7TI99foDBpXm1SY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5b09dc45f24cf32316283e62aec81ffee3c3e376",
"type": "github"
},
"original": {
@@ -103,40 +453,42 @@
"type": "github"
}
},
"nixpkgs-lib": {
"nixpkgs_3": {
"locked": {
"lastModified": 1754788789,
"narHash": "sha256-x2rJ+Ovzq0sCMpgfgGaaqgBSwY+LST+WbZ6TytnT9Rk=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "a73b9c743612e4244d865a2fdee11865283c04e6",
"lastModified": 1758029226,
"narHash": "sha256-TjqVmbpoCqWywY9xIZLTf6ANFvDCXdctCjoYuYPYdMI=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "08b8f92ac6354983f5382124fef6006cade4a1c1",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"advisory-db": "advisory-db",
"crane": "crane",
"attic": "attic",
"cachix": "cachix",
"crane": "crane_2",
"fenix": "fenix",
"flake-compat": "flake-compat",
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs",
"treefmt-nix": "treefmt-nix"
"flake-compat": "flake-compat_3",
"flake-utils": "flake-utils",
"nix-filter": "nix-filter",
"nixpkgs": "nixpkgs_3"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1761077270,
"narHash": "sha256-O1uTuvI/rUlubJ8AXKyzh1WSWV3qCZX0huTFUvWLN4E=",
"lastModified": 1757362324,
"narHash": "sha256-/PAhxheUq4WBrW5i/JHzcCqK5fGWwLKdH6/Lu1tyS18=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "39990a923c8bca38f5bd29dc4c96e20ee7808d5d",
"rev": "9edc9cbe5d8e832b5864e09854fa94861697d2fd",
"type": "github"
},
"original": {
@@ -146,23 +498,18 @@
"type": "github"
}
},
"treefmt-nix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"systems": {
"locked": {
"lastModified": 1760945191,
"narHash": "sha256-ZRVs8UqikBa4Ki3X4KCnMBtBW0ux1DaT35tgsnB1jM4=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "f56b1934f5f8fcab8deb5d38d42fd692632b47c2",
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}

371
flake.nix
View File

@@ -1,46 +1,351 @@
{
description = "A nix flake for the continuwuity project";
inputs = {
# basics
flake-parts.url = "github:hercules-ci/flake-parts";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
# for rust via nix
crane.url = "github:ipetkov/crane";
attic.url = "github:zhaofengli/attic?ref=main";
cachix.url = "github:cachix/cachix?ref=master";
crane = {
url = "github:ipetkov/crane?ref=master";
};
fenix = {
url = "github:nix-community/fenix";
url = "github:nix-community/fenix?ref=main";
inputs.nixpkgs.follows = "nixpkgs";
};
# for vuln checks
advisory-db = {
url = "github:rustsec/advisory-db";
flake = false;
};
treefmt-nix = {
url = "github:numtide/treefmt-nix";
inputs.nixpkgs.follows = "nixpkgs";
};
# for default.nix
flake-compat = {
url = "github:edolstra/flake-compat?ref=master";
flake = false;
};
flake-utils.url = "github:numtide/flake-utils?ref=main";
nix-filter.url = "github:numtide/nix-filter?ref=main";
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
};
outputs =
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [ ./nix ];
systems = [
# good support
"x86_64-linux"
# support untested but theoretically there
"aarch64-linux"
];
};
inputs:
inputs.flake-utils.lib.eachDefaultSystem (
system:
let
pkgsHost = import inputs.nixpkgs {
inherit system;
};
fnx = inputs.fenix.packages.${system};
# The Rust toolchain to use
toolchain = fnx.combine [
(fnx.fromToolchainFile {
file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml`
sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE=";
})
fnx.complete.rustfmt
];
mkScope =
pkgs:
pkgs.lib.makeScope pkgs.newScope (self: {
inherit pkgs inputs;
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
main = self.callPackage ./pkg/nix/pkgs/main { };
liburing = pkgs.liburing.overrideAttrs {
# Tests weren't building
outputs = [
"out"
"dev"
"man"
];
buildFlags = [ "library" ];
};
rocksdb =
(pkgs.rocksdb_9_10.override {
# Override the liburing input for the build with our own so
# we have it built with the library flag
inherit (self) liburing;
}).overrideAttrs
(old: {
src = pkgsHost.fetchFromGitea {
domain = "forgejo.ellis.link";
owner = "continuwuation";
repo = "rocksdb";
rev = "10.5.fb";
sha256 = "sha256-X4ApGLkHF9ceBtBg77dimEpu720I79ffLoyPa8JMHaU=";
};
version = "v10.5.fb";
cmakeFlags =
pkgs.lib.subtractLists [
# No real reason to have snappy or zlib, no one uses this
"-DWITH_SNAPPY=1"
"-DZLIB=1"
"-DWITH_ZLIB=1"
# We don't need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=1"
# We don't need to build rocksdb tests
"-DWITH_TESTS=1"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"-DUSE_RTTI=1"
# This doesn't exist in RocksDB, and USE_SSE is deprecated for
# PORTABLE=$(march)
"-DFORCE_SSE42=1"
# PORTABLE will get set in main/default.nix
"-DPORTABLE=1"
] old.cmakeFlags
++ [
# No real reason to have snappy, no one uses this
"-DWITH_SNAPPY=0"
"-DZLIB=0"
"-DWITH_ZLIB=0"
# We don't need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=0"
# We don't need trace tools
"-DWITH_TRACE_TOOLS=0"
# We don't need to build rocksdb tests
"-DWITH_TESTS=0"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"-DUSE_RTTI=0"
];
# outputs has "tools" which we don't need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we don't need or use
preInstall = "";
# We have this already at https://forgejo.ellis.link/continuwuation/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
# Unsetting this so we don't have to revert it and make this nix exclusive
patches = [ ];
postPatch = ''
# Fix gcc-13 build failures due to missing <cstdint> and
# <system_error> includes, fixed upstream since 8.x
sed -e '1i #include <cstdint>' -i db/compaction/compaction_iteration_stats.h
sed -e '1i #include <cstdint>' -i table/block_based/data_block_hash_index.h
sed -e '1i #include <cstdint>' -i util/string_util.h
sed -e '1i #include <cstdint>' -i include/rocksdb/utilities/checkpoint.h
'';
});
});
scopeHost = mkScope pkgsHost;
mkCrossScope =
crossSystem:
let
pkgsCrossStatic =
(import inputs.nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
in
mkScope pkgsCrossStatic;
in
{
packages =
{
default = scopeHost.main.override {
disable_features = [
# Don't include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
default-debug = scopeHost.main.override {
profile = "dev";
# Debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
"experimental"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
# Just a test profile used for things like CI and complement
default-test = scopeHost.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
all-features = scopeHost.main.override {
all_features = true;
disable_features = [
# Don't include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
all-features-debug = scopeHost.main.override {
profile = "dev";
all_features = true;
# Debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
"experimental"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
hmalloc = scopeHost.main.override { features = [ "hardened_malloc" ]; };
}
// builtins.listToAttrs (
builtins.concatLists (
builtins.map
(
crossSystem:
let
binaryName = "static-${crossSystem}";
scopeCrossStatic = mkCrossScope crossSystem;
in
[
# An output for a statically-linked binary
{
name = binaryName;
value = scopeCrossStatic.main;
}
# An output for a statically-linked binary with x86_64 haswell
# target optimisations
{
name = "${binaryName}-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
x86_64_haswell_target_optimised =
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
};
}
# An output for a statically-linked unstripped debug ("dev") binary
{
name = "${binaryName}-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
# debug build users expect full logs
disable_release_max_log_level = true;
};
}
# An output for a statically-linked unstripped debug binary with the
# "test" profile (for CI usage only)
{
name = "${binaryName}-test";
value = scopeCrossStatic.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features`
{
name = "${binaryName}-all-features";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features` and with x86_64 haswell
# target optimisations
{
name = "${binaryName}-all-features-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
x86_64_haswell_target_optimised =
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
};
}
# An output for a statically-linked unstripped debug ("dev") binary with `--all-features`
{
name = "${binaryName}-all-features-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with hardened_malloc
{
name = "${binaryName}-hmalloc";
value = scopeCrossStatic.main.override {
features = [ "hardened_malloc" ];
};
}
]
)
[
#"x86_64-apple-darwin"
#"aarch64-apple-darwin"
"x86_64-linux-gnu"
"x86_64-linux-musl"
"aarch64-linux-musl"
]
)
);
}
);
}

View File

@@ -1,108 +0,0 @@
{ inputs, ... }:
{
perSystem =
{
self',
lib,
pkgs,
...
}:
let
uwulib = inputs.self.uwulib.init pkgs;
rocksdbAllFeatures = self'.packages.rocksdb.override {
enableJemalloc = true;
enableLiburing = true;
};
commonAttrs = (uwulib.build.commonAttrs { }) // {
buildInputs = [
pkgs.liburing
pkgs.rust-jemalloc-sys-unprefixed
rocksdbAllFeatures
];
nativeBuildInputs = [
pkgs.pkg-config
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgs.rustPlatform.bindgenHook
];
env = {
LIBCLANG_PATH = lib.makeLibraryPath [ pkgs.llvmPackages.libclang.lib ];
LD_LIBRARY_PATH = lib.makeLibraryPath [
pkgs.liburing
pkgs.rust-jemalloc-sys-unprefixed
rocksdbAllFeatures
];
}
// uwulib.environment.buildPackageEnv
// {
ROCKSDB_INCLUDE_DIR = "${rocksdbAllFeatures}/include";
ROCKSDB_LIB_DIR = "${rocksdbAllFeatures}/lib";
};
};
cargoArtifacts = self'.packages.continuwuity-all-features-deps;
in
{
# taken from
#
# https://crane.dev/examples/quick-start.html
checks = {
continuwuity-all-features-build = self'.packages.continuwuity-all-features-bin;
continuwuity-all-features-clippy = uwulib.build.craneLibForChecks.cargoClippy (
commonAttrs
// {
inherit cargoArtifacts;
cargoClippyExtraArgs = "-- --deny warnings";
}
);
continuwuity-all-features-docs = uwulib.build.craneLibForChecks.cargoDoc (
commonAttrs
// {
inherit cargoArtifacts;
# This can be commented out or tweaked as necessary, e.g. set to
# `--deny rustdoc::broken-intra-doc-links` to only enforce that lint
env.RUSTDOCFLAGS = "--deny warnings";
}
);
# Check formatting
continuwuity-all-features-fmt = uwulib.build.craneLibForChecks.cargoFmt {
src = uwulib.build.src;
};
continuwuity-all-features-toml-fmt = uwulib.build.craneLibForChecks.taploFmt {
src = pkgs.lib.sources.sourceFilesBySuffices uwulib.build.src [ ".toml" ];
# taplo arguments can be further customized below as needed
taploExtraArgs = "--config ${inputs.self}/taplo.toml";
};
# Audit dependencies
continuwuity-all-features-audit = uwulib.build.craneLibForChecks.cargoAudit {
inherit (inputs) advisory-db;
src = uwulib.build.src;
};
# Audit licenses
continuwuity-all-features-deny = uwulib.build.craneLibForChecks.cargoDeny {
src = uwulib.build.src;
};
# Run tests with cargo-nextest
# Consider setting `doCheck = false` on `continuwuity-all-features` if you do not want
# the tests to run twice
continuwuity-all-features-nextest = uwulib.build.craneLibForChecks.cargoNextest (
commonAttrs
// {
inherit cargoArtifacts;
partitions = 1;
partitionType = "count";
cargoNextestPartitionsExtraArgs = "--no-tests=pass";
}
);
};
};
}

View File

@@ -1,11 +0,0 @@
{
imports = [
./checks
./packages
./shells
./tests
./hydra.nix
./fmt.nix
];
}

View File

@@ -1,37 +0,0 @@
{ inputs, ... }:
{
# load the flake module from upstream
imports = [ inputs.treefmt-nix.flakeModule ];
perSystem =
{ self', lib, ... }:
{
treefmt = {
# repo root as project root
projectRoot = inputs.self;
# the formatters
programs = {
nixfmt.enable = true;
typos = {
enable = true;
configFile = "${inputs.self}/.typos.toml";
};
taplo = {
enable = true;
settings = lib.importTOML "${inputs.self}/taplo.toml";
};
};
settings.formatter.rustfmt = {
command = "${lib.getExe' self'.packages.dev-toolchain "rustfmt"}";
includes = [ "**/*.rs" ];
options = [
"--unstable-features"
"--edition=2024"
"--config-path=${inputs.self}/rustfmt.toml"
];
};
};
};
}

View File

@@ -1,9 +0,0 @@
{ inputs, ... }:
let
lib = inputs.nixpkgs.lib;
in
{
flake.hydraJobs.packages = builtins.mapAttrs (
_name: lib.hydraJob
) inputs.self.packages.x86_64-linux;
}

View File

@@ -1,60 +0,0 @@
{ inputs, ... }:
{
perSystem =
{
self',
lib,
pkgs,
...
}:
let
uwulib = inputs.self.uwulib.init pkgs;
in
{
packages =
lib.pipe
[
# this is the default variant
{
variantName = "default";
commonAttrsArgs.profile = "release";
rocksdb = self'.packages.rocksdb;
features = { };
}
# this is the variant with all features enabled (liburing + jemalloc)
{
variantName = "all-features";
commonAttrsArgs.profile = "release";
rocksdb = self'.packages.rocksdb.override {
enableJemalloc = true;
enableLiburing = true;
};
features = {
enabledFeatures = "all";
disabledFeatures = uwulib.features.defaultDisabledFeatures ++ [ "bindgen-static" ];
};
}
]
[
(builtins.map (cfg: rec {
deps = {
name = "continuwuity-${cfg.variantName}-deps";
value = uwulib.build.buildDeps {
features = uwulib.features.calcFeatures cfg.features;
inherit (cfg) commonAttrsArgs rocksdb;
};
};
bin = {
name = "continuwuity-${cfg.variantName}-bin";
value = uwulib.build.buildPackage {
deps = self'.packages.${deps.name};
features = uwulib.features.calcFeatures cfg.features;
inherit (cfg) commonAttrsArgs rocksdb;
};
};
}))
(builtins.concatMap builtins.attrValues)
builtins.listToAttrs
];
};
}

View File

@@ -1,14 +0,0 @@
{
imports = [
./continuwuity
./rocksdb
./rust.nix
./uwulib
];
perSystem =
{ self', ... }:
{
packages.default = self'.packages.continuwuity-default-bin;
};
}

View File

@@ -1,12 +0,0 @@
{
perSystem =
{
pkgs,
...
}:
{
packages = {
rocksdb = pkgs.callPackage ./package.nix { };
};
};
}

View File

@@ -1,88 +0,0 @@
{
lib,
stdenv,
rocksdb,
liburing,
rust-jemalloc-sys-unprefixed,
enableJemalloc ? false,
enableLiburing ? false,
fetchFromGitea,
...
}:
let
notDarwin = !stdenv.hostPlatform.isDarwin;
in
(rocksdb.override {
# Override the liburing input for the build with our own so
# we have it built with the library flag
inherit liburing;
jemalloc = rust-jemalloc-sys-unprefixed;
# rocksdb fails to build with prefixed jemalloc, which is required on
# darwin due to [1]. In this case, fall back to building rocksdb with
# libc malloc. This should not cause conflicts, because all of the
# jemalloc symbols are prefixed.
#
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
enableJemalloc = enableJemalloc && notDarwin;
# for some reason enableLiburing in nixpkgs rocksdb is default true
# which breaks Darwin entirely
enableLiburing = enableLiburing && notDarwin;
}).overrideAttrs
(old: {
src = fetchFromGitea {
domain = "forgejo.ellis.link";
owner = "continuwuation";
repo = "rocksdb";
rev = "10.5.fb";
sha256 = "sha256-X4ApGLkHF9ceBtBg77dimEpu720I79ffLoyPa8JMHaU=";
};
version = "10.5.fb";
cmakeFlags =
lib.subtractLists (builtins.map (flag: lib.cmakeBool flag true) [
# No real reason to have snappy or zlib, no one uses this
"WITH_SNAPPY"
"ZLIB"
"WITH_ZLIB"
# We don't need to use ldb or sst_dump (core_tools)
"WITH_CORE_TOOLS"
# We don't need to build rocksdb tests
"WITH_TESTS"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"USE_RTTI"
# This doesn't exist in RocksDB, and USE_SSE is deprecated for
# PORTABLE=$(march)
"FORCE_SSE42"
]) old.cmakeFlags
++ (builtins.map (flag: lib.cmakeBool flag false) [
# No real reason to have snappy, no one uses this
"WITH_SNAPPY"
"ZLIB"
"WITH_ZLIB"
# We don't need to use ldb or sst_dump (core_tools)
"WITH_CORE_TOOLS"
# We don't need trace tools
"WITH_TRACE_TOOLS"
# We don't need to build rocksdb tests
"WITH_TESTS"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"USE_RTTI"
]);
enableLiburing = enableLiburing && notDarwin;
# outputs has "tools" which we don't need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we don't need or use
preInstall = "";
# We have this already at https://forgejo.ellis.link/continuwuation/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
# Unsetting `patches` so we don't have to revert it and make this nix exclusive
patches = [ ];
})

View File

@@ -1,32 +0,0 @@
{ inputs, ... }:
{
perSystem =
{
system,
lib,
...
}:
{
packages =
let
fnx = inputs.fenix.packages.${system};
stable = fnx.fromToolchainFile {
file = inputs.self + "/rust-toolchain.toml";
# See also `rust-toolchain.toml`
sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE=";
};
in
{
# used for building nix stuff (doesn't include rustfmt overhead)
build-toolchain = stable;
# used for dev shells
dev-toolchain = fnx.combine [
stable
# use the nightly rustfmt because we use nightly features
fnx.complete.rustfmt
];
};
};
}

View File

@@ -1,108 +0,0 @@
args@{ pkgs, inputs, ... }:
let
inherit (pkgs) lib;
uwuenv = import ./environment.nix args;
selfpkgs = inputs.self.packages.${pkgs.stdenv.system};
in
rec {
# basic, very minimal instance of the crane library with a minimal rust toolchain
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: selfpkgs.build-toolchain);
# the checks require more rust toolchain components, hence we have this separate instance of the crane library
craneLibForChecks = (inputs.crane.mkLib pkgs).overrideToolchain (_: selfpkgs.dev-toolchain);
# meta information (name, version, etc) of the rust crate based on the Cargo.toml
crateInfo = craneLib.crateNameFromCargoToml { cargoToml = "${inputs.self}/Cargo.toml"; };
src =
let
# see https://crane.dev/API.html#cranelibfiltercargosources
#
# we need to keep the `web` directory which would be filtered out by the regular source filtering function
#
# https://crane.dev/API.html#cranelibcleancargosource
isWebTemplate = path: _type: builtins.match ".*src/web.*" path != null;
isRust = craneLib.filterCargoSources;
isNix = path: _type: builtins.match ".+/nix.*" path != null;
webOrRustNotNix = p: t: !(isNix p t) && (isWebTemplate p t || isRust p t);
in
lib.cleanSourceWith {
src = inputs.self;
filter = webOrRustNotNix;
name = "source";
};
# common attrs that are shared between building continuwuity's deps and the package itself
commonAttrs =
{
profile ? "dev",
...
}:
{
inherit (crateInfo)
pname
version
;
inherit src;
# this prevents unnecessary rebuilds
strictDeps = true;
dontStrip = profile == "dev" || profile == "test";
dontPatchELF = profile == "dev" || profile == "test";
doCheck = true;
nativeBuildInputs = [
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgs.rustPlatform.bindgenHook
];
};
makeRocksDBEnv =
{ rocksdb }:
{
ROCKSDB_INCLUDE_DIR = "${rocksdb}/include";
ROCKSDB_LIB_DIR = "${rocksdb}/lib";
};
# function that builds the continuwuity dependencies derivation
buildDeps =
{
rocksdb,
features,
commonAttrsArgs,
}:
craneLib.buildDepsOnly (
(commonAttrs commonAttrsArgs)
// {
env = uwuenv.buildDepsOnlyEnv // (makeRocksDBEnv { inherit rocksdb; });
inherit (features) cargoExtraArgs;
}
);
# function that builds the continuwuity package
buildPackage =
{
deps,
rocksdb,
features,
commonAttrsArgs,
}:
let
rocksdbEnv = makeRocksDBEnv { inherit rocksdb; };
in
craneLib.buildPackage (
(commonAttrs commonAttrsArgs)
// {
cargoArtifacts = deps;
doCheck = true;
env = uwuenv.buildPackageEnv // rocksdbEnv;
passthru.env = uwuenv.buildPackageEnv // rocksdbEnv;
meta.mainProgram = crateInfo.pname;
inherit (features) cargoExtraArgs;
}
);
}

View File

@@ -1,10 +0,0 @@
{ inputs, ... }:
{
flake.uwulib = {
init = pkgs: {
features = import ./features.nix { inherit pkgs inputs; };
environment = import ./environment.nix { inherit pkgs inputs; };
build = import ./build.nix { inherit pkgs inputs; };
};
};
}

View File

@@ -1,18 +0,0 @@
args@{ pkgs, inputs, ... }:
let
uwubuild = import ./build.nix args;
in
rec {
buildDepsOnlyEnv = {
# https://crane.dev/faq/rebuilds-bindgen.html
NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa";
CARGO_PROFILE = "release";
}
// uwubuild.craneLib.mkCrossToolchainEnv (p: pkgs.clangStdenv);
buildPackageEnv = {
GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or "";
GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
}
// buildDepsOnlyEnv;
}

View File

@@ -1,77 +0,0 @@
{ pkgs, inputs, ... }:
let
inherit (pkgs) lib;
in
rec {
defaultDisabledFeatures = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
# we don't want to enable this feature set by default but be more specific about it
"full"
];
# We perform default-feature unification in nix, because some of the dependencies
# on the nix side depend on feature values.
calcFeatures =
{
tomlPath ? "${inputs.self}/src/main",
# either a list of feature names or a string "all" which enables all non-default features
enabledFeatures ? [ ],
disabledFeatures ? defaultDisabledFeatures,
default_features ? true,
disable_release_max_log_level ? false,
}:
let
# simple helper to get the contents of a Cargo.toml file in a nix format
getToml = path: lib.importTOML "${path}/Cargo.toml";
# get all the features except for the default features
allFeatures = lib.pipe tomlPath [
getToml
(manifest: manifest.features)
lib.attrNames
(lib.remove "default")
];
# get just the default enabled features
allDefaultFeatures = lib.pipe tomlPath [
getToml
(manifest: manifest.features.default)
];
# depending on the value of enabledFeatures choose just a set or all non-default features
#
# - [ list of features ] -> choose exactly the features listed
# - "all" -> choose all non-default features
additionalFeatures = if enabledFeatures == "all" then allFeatures else enabledFeatures;
# unification with default features (if enabled)
features = lib.unique (additionalFeatures ++ lib.optionals default_features allDefaultFeatures);
# prepare the features that are subtracted from the set
disabledFeatures' =
disabledFeatures ++ lib.optionals disable_release_max_log_level [ "release_max_log_level" ];
# construct the final feature set
finalFeatures = lib.subtractLists disabledFeatures' features;
in
{
# final feature set, useful for querying it
features = finalFeatures;
# crane flag with the relevant features
cargoExtraArgs = builtins.concatStringsSep " " [
"--no-default-features"
"--locked"
(lib.optionalString (finalFeatures != [ ]) "--features")
(builtins.concatStringsSep "," finalFeatures)
];
};
}

View File

@@ -1,29 +0,0 @@
{ inputs, ... }:
{
perSystem =
{
self',
lib,
pkgs,
...
}:
let
uwulib = inputs.self.uwulib.init pkgs;
rocksdbAllFeatures = self'.packages.rocksdb.override {
enableJemalloc = true;
enableLiburing = true;
};
in
{
# basic nix shell containing all things necessary to build continuwuity in all flavors manually (on x86_64-linux)
devShells.default = uwulib.build.craneLib.devShell {
packages = [
pkgs.pkg-config
pkgs.liburing
pkgs.rust-jemalloc-sys-unprefixed
rocksdbAllFeatures
];
env.LIBCLANG_PATH = lib.makeLibraryPath [ pkgs.llvmPackages.libclang.lib ];
};
};
}

View File

@@ -1,124 +0,0 @@
{
perSystem =
{
self',
lib,
pkgs,
...
}:
{
# run some nixos tests as checks
checks = lib.pipe self'.packages [
# we take all packages (names)
builtins.attrNames
# we filter out all packages that end with `-bin` (which we are interested in for testing)
(builtins.filter (lib.hasSuffix "-bin"))
# for each of these binaries we built the basic nixos test
#
# this test was initially yoinked from
#
# https://github.com/NixOS/nixpkgs/blob/960ce26339661b1b69c6f12b9063ca51b688615f/nixos/tests/matrix/continuwuity.nix
(builtins.map (name: {
name = "test-${name}";
value = pkgs.testers.runNixOSTest {
inherit name;
nodes = {
continuwuity = {
services.matrix-continuwuity = {
enable = true;
package = self'.packages.${name};
settings.global = {
server_name = name;
address = [ "0.0.0.0" ];
allow_registration = true;
yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true;
};
extraEnvironment.RUST_BACKTRACE = "yes";
};
networking.firewall.allowedTCPPorts = [ 6167 ];
};
client =
{ pkgs, ... }:
{
environment.systemPackages = [
(pkgs.writers.writePython3Bin "do_test" { libraries = [ pkgs.python3Packages.matrix-nio ]; } ''
import asyncio
import nio
async def main() -> None:
# Connect to continuwuity
client = nio.AsyncClient("http://continuwuity:6167", "alice")
# Register as user alice
response = await client.register("alice", "my-secret-password")
# Log in as user alice
response = await client.login("my-secret-password")
# Create a new room
response = await client.room_create(federate=False)
print("Matrix room create response:", response)
assert isinstance(response, nio.RoomCreateResponse)
room_id = response.room_id
# Join the room
response = await client.join(room_id)
print("Matrix join response:", response)
assert isinstance(response, nio.JoinResponse)
# Send a message to the room
response = await client.room_send(
room_id=room_id,
message_type="m.room.message",
content={
"msgtype": "m.text",
"body": "Hello continuwuity!"
}
)
print("Matrix room send response:", response)
assert isinstance(response, nio.RoomSendResponse)
# Sync responses
response = await client.sync(timeout=30000)
print("Matrix sync response:", response)
assert isinstance(response, nio.SyncResponse)
# Check the message was received by continuwuity
last_message = response.rooms.join[room_id].timeline.events[-1].body
assert last_message == "Hello continuwuity!"
# Leave the room
response = await client.room_leave(room_id)
print("Matrix room leave response:", response)
assert isinstance(response, nio.RoomLeaveResponse)
# Close the client
await client.close()
if __name__ == "__main__":
asyncio.run(main())
'')
];
};
};
testScript = ''
start_all()
with subtest("start continuwuity"):
continuwuity.wait_for_unit("continuwuity.service")
continuwuity.wait_for_open_port(6167)
with subtest("ensure messages can be exchanged"):
client.succeed("do_test >&2")
'';
};
}))
builtins.listToAttrs
];
};
}

View File

@@ -12,14 +12,13 @@ Group=conduwuit
Type=notify-reload
ReloadSignal=SIGUSR1
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
Environment="CONTINUWUITY_DATABASE_PATH=%S/conduwuit"
Environment="CONTINUWUITY_CONFIG_RELOAD_SIGNAL=true"
Environment="CONTINUWUITY_DATABASE_PATH=/var/lib/conduwuit"
LoadCredential=conduwuit.toml:/etc/conduwuit/conduwuit.toml
ExecStart=/usr/bin/conduwuit --config ${CREDENTIALS_DIRECTORY}/conduwuit.toml
ExecStart=/usr/bin/conduwuit
AmbientCapabilities=
CapabilityBoundingSet=
@@ -53,9 +52,8 @@ SystemCallFilter=@system-service @resources
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
SystemCallErrorNumber=EPERM
# ConfigurationDirectory isn't specified here because it's created by
# the distro's package manager.
StateDirectory=conduwuit
ConfigurationDirectory=conduwuit
RuntimeDirectory=conduwuit
RuntimeDirectoryMode=0750

View File

@@ -51,7 +51,7 @@ find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
%install
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
install -Dpm0644 pkg/conduwuit.service -t %{buildroot}%{_unitdir}
install -Dpm0600 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
install -Dpm0644 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
%files
%license LICENSE
@@ -60,7 +60,7 @@ install -Dpm0600 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/con
%doc CONTRIBUTING.md
%doc README.md
%doc SECURITY.md
%config(noreplace) %{_sysconfdir}/conduwuit/conduwuit.toml
%config %{_sysconfdir}/conduwuit/conduwuit.toml
%{_bindir}/conduwuit
%{_unitdir}/conduwuit.service

View File

@@ -0,0 +1,83 @@
{ lib
, pkgsBuildHost
, rust
, stdenv
}:
lib.optionalAttrs stdenv.hostPlatform.isStatic
{
ROCKSDB_STATIC = "";
}
//
{
CARGO_BUILD_RUSTFLAGS =
lib.concatStringsSep
" "
(lib.optionals
stdenv.hostPlatform.isStatic
[ "-C" "relocation-model=static" ]
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
[
"-l"
"c"
"-l"
"stdc++"
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
]
);
}
# What follows is stolen from [here][0]. Its purpose is to properly
# configure compilers and linkers for various stages of the build, and
# even covers the case of build scripts that need native code compiled and
# run on the build platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/nixpkgs-unstable/pkgs/build-support/rust/lib/default.nix#L48-L68
//
(
let
inherit (rust.lib) envVars;
in
lib.optionalAttrs
(stdenv.targetPlatform.rust.rustcTarget
!= stdenv.hostPlatform.rust.rustcTarget)
(
let
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForTarget;
}
)
//
(
let
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForHost;
CARGO_BUILD_TARGET = rustcTarget;
}
)
//
(
let
inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForBuild;
HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc";
HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++";
}
)
)

View File

@@ -0,0 +1,224 @@
# Dependencies (keep sorted)
{ craneLib
, inputs
, jq
, lib
, libiconv
, liburing
, pkgsBuildHost
, rocksdb
, removeReferencesTo
, rust
, rust-jemalloc-sys
, stdenv
# Options (keep sorted)
, all_features ? false
, default_features ? true
# default list of disabled features
, disable_features ? [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
]
, disable_release_max_log_level ? false
, features ? [ ]
, profile ? "release"
# rocksdb compiled with -march=haswell and target-cpu=haswell rustflag
# haswell is pretty much any x86 cpu made in the last 12 years, and
# supports modern CPU extensions that rocksdb can make use of.
# disable if trying to make a portable x86_64 build for very old hardware
, x86_64_haswell_target_optimised ? false
}:
let
# We perform default-feature unification in nix, because some of the dependencies
# on the nix side depend on feature values.
crateFeatures = path:
let manifest = lib.importTOML "${path}/Cargo.toml"; in
lib.remove "default" (lib.attrNames manifest.features);
crateDefaultFeatures = path:
(lib.importTOML "${path}/Cargo.toml").features.default;
allDefaultFeatures = crateDefaultFeatures "${inputs.self}/src/main";
allFeatures = crateFeatures "${inputs.self}/src/main";
features' = lib.unique
(features ++
lib.optionals default_features allDefaultFeatures ++
lib.optionals all_features allFeatures);
disable_features' = disable_features ++ lib.optionals disable_release_max_log_level [ "release_max_log_level" ];
features'' = lib.subtractLists disable_features' features';
featureEnabled = feature: builtins.elem feature features'';
enableLiburing = featureEnabled "io_uring" && !stdenv.hostPlatform.isDarwin;
# This derivation will set the JEMALLOC_OVERRIDE variable, causing the
# tikv-jemalloc-sys crate to use the nixpkgs jemalloc instead of building it's
# own. In order for this to work, we need to set flags on the build that match
# whatever flags tikv-jemalloc-sys was going to use. These are dependent on
# which features we enable in tikv-jemalloc-sys.
rust-jemalloc-sys' = (rust-jemalloc-sys.override {
# tikv-jemalloc-sys/unprefixed_malloc_on_supported_platforms feature
unprefixed = true;
}).overrideAttrs (old: {
configureFlags = old.configureFlags ++
# we dont need docs
[ "--disable-doc" ] ++
# we dont need cxx/C++ integration
[ "--disable-cxx" ] ++
# tikv-jemalloc-sys/profiling feature
lib.optional (featureEnabled "jemalloc_prof") "--enable-prof" ++
# tikv-jemalloc-sys/stats feature
(if (featureEnabled "jemalloc_stats") then [ "--enable-stats" ] else [ "--disable-stats" ]);
});
buildDepsOnlyEnv =
let
rocksdb' = (rocksdb.override {
jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys';
# rocksdb fails to build with prefixed jemalloc, which is required on
# darwin due to [1]. In this case, fall back to building rocksdb with
# libc malloc. This should not cause conflicts, because all of the
# jemalloc symbols are prefixed.
#
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
enableJemalloc = featureEnabled "jemalloc" && !stdenv.hostPlatform.isDarwin;
# for some reason enableLiburing in nixpkgs rocksdb is default true
# which breaks Darwin entirely
inherit enableLiburing;
}).overrideAttrs (old: {
inherit enableLiburing;
cmakeFlags = (if x86_64_haswell_target_optimised then
(lib.subtractLists [
# dont make a portable build if x86_64_haswell_target_optimised is enabled
"-DPORTABLE=1"
]
old.cmakeFlags
++ [ "-DPORTABLE=haswell" ]) else [ "-DPORTABLE=1" ]
)
++ old.cmakeFlags;
# outputs has "tools" which we dont need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
preInstall = "";
});
in
{
# https://crane.dev/faq/rebuilds-bindgen.html
NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa";
CARGO_PROFILE = profile;
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
}
//
(import ./cross-compilation-env.nix {
# Keep sorted
inherit
lib
pkgsBuildHost
rust
stdenv;
});
buildPackageEnv = {
GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or "";
GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
} // buildDepsOnlyEnv // {
# Only needed in static stdenv because these are transitive dependencies of rocksdb
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
+ lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic)
" -L${lib.getLib liburing}/lib -luring"
+ lib.optionalString x86_64_haswell_target_optimised
" -Ctarget-cpu=haswell";
};
commonAttrs = {
inherit
(craneLib.crateNameFromCargoToml {
cargoToml = "${inputs.self}/Cargo.toml";
})
pname
version;
src = let filter = inputs.nix-filter.lib; in filter {
root = inputs.self;
# Keep sorted
include = [
".cargo"
"Cargo.lock"
"Cargo.toml"
"src"
"xtask"
];
};
doCheck = true;
cargoExtraArgs = "--no-default-features --locked "
+ lib.optionalString
(features'' != [ ])
"--features " + (builtins.concatStringsSep "," features'');
dontStrip = profile == "dev" || profile == "test";
dontPatchELF = profile == "dev" || profile == "test";
buildInputs = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'
# needed to build Rust applications on macOS
++ lib.optionals stdenv.hostPlatform.isDarwin [
# https://github.com/NixOS/nixpkgs/issues/206242
# ld: library not found for -liconv
libiconv
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
];
nativeBuildInputs = [
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgsBuildHost.rustPlatform.bindgenHook
# We don't actually depend on `jq`, but crane's `buildPackage` does, but
# its `buildDepsOnly` doesn't. This causes those two derivations to have
# differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious
# rebuilds of bindgen and its depedents.
jq
];
};
in
craneLib.buildPackage (commonAttrs // {
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
env = buildDepsOnlyEnv;
});
doCheck = true;
cargoExtraArgs = "--no-default-features --locked "
+ lib.optionalString
(features'' != [ ])
"--features " + (builtins.concatStringsSep "," features'');
env = buildPackageEnv;
passthru = {
env = buildPackageEnv;
};
meta.mainProgram = commonAttrs.pname;
})

View File

@@ -10,19 +10,15 @@
"nix": {
"enabled": true
},
"pre-commit": {
"enabled": true
},
"labels": ["Dependencies", "Dependencies/Renovate"],
"ignoreDeps": [
"tikv-jemallocator",
"tikv-jemalloc-sys",
"tikv-jemalloc-ctl",
"rustyline-async",
"event-listener",
"async-channel",
"core_affinity",
"hyper-util"
"opentelemetry",
"opentelemetry_sdk",
"opentelemetry-jaeger",
"tracing-opentelemetry"
],
"github-actions": {
"enabled": true,
@@ -68,8 +64,12 @@
"matchDatasources": ["docker"],
"matchPackageNames": ["ghcr.io/renovatebot/renovate"],
"automerge": true,
"automergeStrategy": "fast-forward",
"extends": ["schedule:earlyMondays"]
"automergeStrategy": "fast-forward"
},
{
"description": "Group lockfile updates into a single PR",
"matchUpdateTypes": ["lockFileMaintenance"],
"groupName": "lockfile-maintenance"
}
],
"customManagers": [
@@ -81,7 +81,7 @@
"/(^|/|\\.)([Dd]ocker|[Cc]ontainer)file$/"
],
"matchStrings": [
"# renovate: datasource=(?<datasource>[a-zA-Z0-9-._]+?) depName=(?<depName>[^\\s]+?)(?: (lookupName|packageName)=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[^\\s]+?))?(?: extractVersion=(?<extractVersion>[^\\s]+?))?(?: registryUrl=(?<registryUrl>[^\\s]+?))?\\s+(?:ENV\\s+|ARG\\s+)?[A-Za-z0-9_]+?_VERSION[ =][\"']?(?<currentValue>.+?)[\"']?\\s+(?:(?:ENV\\s+|ARG\\s+)?[A-Za-z0-9_]+?_CHECKSUM[ =][\"']?(?<currentDigest>.+?)[\"']?\\s)?"
"# renovate: datasource=(?<datasource>[a-z-.]+?) depName=(?<depName>[^\\s]+?)(?: (lookupName|packageName)=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[^\\s]+?))?(?: extractVersion=(?<extractVersion>[^\\s]+?))?(?: registryUrl=(?<registryUrl>[^\\s]+?))?\\s+(?:ENV|ARG)\\s+[A-Za-z0-9_]+?_VERSION[ =][\"']?(?<currentValue>.+?)[\"']?\\s"
]
}
]

View File

@@ -85,7 +85,7 @@ futures.workspace = true
log.workspace = true
ruma.workspace = true
serde_json.workspace = true
serde-saphyr.workspace = true
serde_yml.workspace = true
tokio.workspace = true
tracing-subscriber.workspace = true
tracing.workspace = true

View File

@@ -16,7 +16,7 @@ pub(super) async fn register(&self) -> Result {
let range = 1..checked!(body_len - 1)?;
let appservice_config_body = body[range].join("\n");
let parsed_config = serde_saphyr::from_str(&appservice_config_body);
let parsed_config = serde_yml::from_str(&appservice_config_body);
match parsed_config {
| Err(e) => return Err!("Could not parse appservice config as YAML: {e}"),
| Ok(registration) => match self
@@ -57,7 +57,7 @@ pub(super) async fn show_appservice_config(&self, appservice_identifier: String)
{
| None => return Err!("Appservice does not exist."),
| Some(config) => {
let config_str = serde_saphyr::to_string(&config)?;
let config_str = serde_yml::to_string(&config)?;
write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```")
},
}

View File

@@ -2,8 +2,7 @@
use conduwuit::{
Err, Result, debug, debug_info, debug_warn, error, info, trace,
utils::time::{TimeDirection, parse_timepoint_ago},
warn,
utils::time::parse_timepoint_ago, warn,
};
use conduwuit_service::media::Dim;
use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName};
@@ -236,19 +235,14 @@ pub(super) async fn delete_past_remote_media(
}
assert!(!(before && after), "--before and --after should not be specified together");
let direction = if after {
TimeDirection::After
} else {
TimeDirection::Before
};
let time_boundary = parse_timepoint_ago(&duration)?;
let duration = parse_timepoint_ago(&duration)?;
let deleted_count = self
.services
.media
.delete_all_media_within_timeframe(
time_boundary,
direction,
.delete_all_remote_media_at_after_time(
duration,
before,
after,
yes_i_want_to_delete_local_media,
)
.await?;

View File

@@ -27,24 +27,12 @@ pub enum MediaCommand {
/// filesystem. This will always ignore errors.
DeleteList,
/// Deletes all remote (and optionally local) media created before/after
/// [duration] ago, using filesystem metadata first created at date, or
/// fallback to last modified date. This will always ignore errors by
/// default.
///
/// * Examples:
/// * Delete all remote media older than a year:
///
/// `!admin media delete-past-remote-media -b 1y`
///
/// * Delete all remote and local media from 3 days ago, up until now:
///
/// `!admin media delete-past-remote-media -a 3d
/// --yes-i-want-to-delete-local-media`
#[command(verbatim_doc_comment)]
/// - Deletes all remote (and optionally local) media created before or
/// after [duration] time using filesystem metadata first created at date,
/// or fallback to last modified date. This will always ignore errors by
/// default.
DeletePastRemoteMedia {
/// - The relative time (e.g. 30s, 5m, 7d) from now within which to
/// search
/// - The relative time (e.g. 30s, 5m, 7d) within which to search
duration: String,
/// - Only delete media created before [duration] ago

View File

@@ -64,14 +64,10 @@ pub(crate) async fn create_content_route(
media_id: &utils::random_string(MXC_LENGTH),
};
if let Err(e) = services
services
.media
.create(mxc, Some(user), Some(&content_disposition), content_type, &body.file)
.await
{
err!("Failed to save uploaded media: {e}");
return Err!(Request(Unknown("Failed to save uploaded media")));
}
.await?;
let blurhash = body.generate_blurhash.then(|| {
services

View File

@@ -97,12 +97,11 @@ pub(crate) async fn upgrade_room_route(
// Create a replacement room
let room_features = RoomVersion::new(&body.new_version)?;
let replacement_room_owned = if !room_features.room_ids_as_hashes {
Some(RoomId::new(services.globals.server_name()))
} else {
let replacement_room: Option<&RoomId> = if room_features.room_ids_as_hashes {
None
} else {
Some(&RoomId::new(services.globals.server_name()))
};
let replacement_room: Option<&RoomId> = replacement_room_owned.as_ref().map(AsRef::as_ref);
let replacement_room_tmp = match replacement_room {
| Some(v) => v,
| None => &RoomId::new(services.globals.server_name()),

View File

@@ -44,7 +44,7 @@ pub(crate) async fn get_hierarchy_route(
.as_ref()
.and_then(|s| PaginationToken::from_str(s).ok());
// Should prevent unexpected behaviour in (bad) clients
// Should prevent unexpeded behaviour in (bad) clients
if let Some(ref token) = key {
if token.suggested_only != body.suggested_only || token.max_depth != max_depth {
return Err!(Request(InvalidParam(

View File

@@ -320,7 +320,6 @@ async fn handle_lists<'a, Rooms, AllRooms>(
for mut range in ranges {
range.0 = uint!(0);
range.1 = range.1.checked_add(uint!(1)).unwrap_or(range.1);
range.1 = range
.1
.clamp(range.0, UInt::try_from(active_rooms.len()).unwrap_or(UInt::MAX));

View File

@@ -78,7 +78,7 @@ pub(crate) async fn well_known_support(
while let Some(user_id) = stream.next().await {
// Skip server user
if *user_id == services.globals.server_user {
continue;
break;
}
contacts.push(Contact {
role: role_value.clone(),

View File

@@ -226,7 +226,6 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
.ruma_route(&server::well_known_server)
.ruma_route(&server::get_content_route)
.ruma_route(&server::get_content_thumbnail_route)
.ruma_route(&server::get_edutypes_route)
.route("/_conduwuit/local_user_count", get(client::conduwuit_local_user_count))
.route("/_continuwuity/local_user_count", get(client::conduwuit_local_user_count));
} else {

View File

@@ -34,19 +34,6 @@ pub(super) async fn from(
let max_body_size = services.server.config.max_request_size;
// Check if the Content-Length header is present and valid, saves us streaming
// the response into memory
if let Some(content_length) = parts.headers.get(http::header::CONTENT_LENGTH) {
if let Ok(content_length) = content_length
.to_str()
.map(|s| s.parse::<usize>().unwrap_or_default())
{
if content_length > max_body_size {
return Err(err!(Request(TooLarge("Request body too large"))));
}
}
}
let body = axum::body::to_bytes(body, max_body_size)
.await
.map_err(|e| err!(Request(TooLarge("Request body too large: {e}"))))?;

View File

@@ -1,19 +0,0 @@
use axum::extract::State;
use conduwuit::Result;
use ruma::api::federation::edutypes::get_edutypes;
use crate::Ruma;
/// # `GET /_matrix/federation/v1/edutypes`
///
/// Lists EDU types we wish to receive
pub(crate) async fn get_edutypes_route(
State(services): State<crate::State>,
_body: Ruma<get_edutypes::unstable::Request>,
) -> Result<get_edutypes::unstable::Response> {
Ok(get_edutypes::unstable::Response {
typing: services.config.allow_incoming_typing,
presence: services.config.allow_incoming_presence,
receipt: services.config.allow_incoming_read_receipts,
})
}

View File

@@ -1,6 +1,6 @@
use axum::extract::State;
use conduwuit::{
Err, Error, Result, debug_info, info, matrix::pdu::PduBuilder, utils::IterStream, warn,
Err, Error, Result, debug_info, matrix::pdu::PduBuilder, utils::IterStream, warn,
};
use conduwuit_service::Services;
use futures::StreamExt;
@@ -22,7 +22,6 @@
/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}`
///
/// Creates a join template.
#[tracing::instrument(skip_all, fields(room_id = %body.room_id, user_id = %body.user_id, origin = %body.origin()))]
pub(crate) async fn create_join_event_template_route(
State(services): State<crate::State>,
body: Ruma<prepare_join_event::v1::Request>,
@@ -73,16 +72,11 @@ pub(crate) async fn create_join_event_template_route(
}
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
let is_invited = services
.rooms
.state_cache
.is_invited(&body.user_id, &body.room_id)
.await;
let join_authorized_via_users_server: Option<OwnedUserId> = {
use RoomVersionId::*;
if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) || is_invited {
// room version does not support restricted join rules, or the user is currently
// already invited
if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
// room version does not support restricted join rules
None
} else if user_can_perform_restricted_join(
&services,
@@ -109,10 +103,6 @@ pub(crate) async fn create_join_event_template_route(
.await
.map(ToOwned::to_owned)
else {
info!(
"No local user is able to authorize the join of {} into {}",
&body.user_id, &body.room_id
);
return Err!(Request(UnableToGrantJoin(
"No user on this server is able to assist in joining."
)));
@@ -177,7 +167,6 @@ pub(crate) async fn user_can_perform_restricted_join(
)
.await
else {
// No join rules means there's nothing to authorise (defaults to invite)
return Ok(false);
};

View File

@@ -1,5 +1,4 @@
pub(super) mod backfill;
pub(super) mod edutypes;
pub(super) mod event;
pub(super) mod event_auth;
pub(super) mod get_missing_events;
@@ -24,7 +23,6 @@
pub(super) mod well_known;
pub(super) use backfill::*;
pub(super) use edutypes::*;
pub(super) use event::*;
pub(super) use event_auth::*;
pub(super) use get_missing_events::*;

View File

@@ -92,7 +92,7 @@ ruma.workspace = true
sanitize-filename.workspace = true
serde_json.workspace = true
serde_regex.workspace = true
serde-saphyr.workspace = true
serde_yml.workspace = true
serde.workspace = true
smallvec.workspace = true
smallstr.workspace = true

View File

@@ -1128,23 +1128,6 @@ pub struct Config {
#[serde(default = "true_fn")]
pub rocksdb_bottommost_compression: bool,
/// Compression algorithm for RocksDB's Write-Ahead-Log (WAL).
///
/// At present, only ZSTD compression is supported by RocksDB for WAL
/// compression. Enabling this can reduce WAL size at the expense of some
/// CPU usage during writes.
///
/// The options are:
/// - "none" = No compression
/// - "zstd" = ZSTD compression
///
/// For more information on WAL compression, see:
/// https://github.com/facebook/rocksdb/wiki/WAL-Compression
///
/// default: "zstd"
#[serde(default = "default_rocksdb_wal_compression")]
pub rocksdb_wal_compression: String,
/// Database recovery mode (for RocksDB WAL corruption).
///
/// Use this option when the server reports corruption and refuses to start.
@@ -1727,19 +1710,6 @@ pub struct Config {
#[serde(default)]
pub block_non_admin_invites: bool,
/// Enable or disable making requests to MSC4284 Policy Servers.
/// It is recommended you keep this enabled unless you experience frequent
/// connectivity issues, such as in a restricted networking environment.
#[serde(default = "true_fn")]
pub enable_msc4284_policy_servers: bool,
/// Enable running locally generated events through configured MSC4284
/// policy servers. You may wish to disable this if your server is
/// single-user for a slight speed benefit in some rooms, but otherwise
/// should leave it enabled.
#[serde(default = "true_fn")]
pub policy_server_check_own_events: bool,
/// Allow admins to enter commands in rooms other than "#admins" (admin
/// room) by prefixing your message with "\!admin" or "\\!admin" followed up
/// a normal continuwuity admin command. The reply will be publicly visible
@@ -2471,8 +2441,6 @@ fn default_rocksdb_compression_algo() -> String {
.to_owned()
}
fn default_rocksdb_wal_compression() -> String { "zstd".to_owned() }
/// Default RocksDB compression level is 32767, which is internally read by
/// RocksDB as the default magic number and translated to the library's default
/// compression level as they all differ. See their `kDefaultCompressionLevel`.

View File

@@ -83,9 +83,7 @@ pub enum Error {
#[error(transparent)]
TypedHeader(#[from] axum_extra::typed_header::TypedHeaderRejection),
#[error(transparent)]
YamlDe(#[from] serde_saphyr::Error),
#[error(transparent)]
YamlSer(#[from] serde_saphyr::ser_error::Error),
Yaml(#[from] serde_yml::Error),
// ruma/conduwuit
#[error("Arithmetic operation failed: {0}")]

View File

@@ -5,17 +5,13 @@
use std::{collections::BTreeMap, sync::OnceLock};
use crate::utils::exchange;
use crate::{SyncMutex, utils::exchange};
/// Raw capture of rustc flags used to build each crate in the project. Informed
/// by rustc_flags_capture macro (one in each crate's mod.rs). This is
/// done during static initialization which is why it's mutex-protected and pub.
/// Should not be written to by anything other than our macro.
///
/// We specifically use a std mutex here because parking_lot cannot be used
/// after thread local storage is destroyed on MacOS.
pub static FLAGS: std::sync::Mutex<BTreeMap<&str, &[&str]>> =
std::sync::Mutex::new(BTreeMap::new());
pub static FLAGS: SyncMutex<BTreeMap<&str, &[&str]>> = SyncMutex::new(BTreeMap::new());
/// Processed list of enabled features across all project crates. This is
/// generated from the data in FLAGS.
@@ -28,7 +24,6 @@ fn init_features() -> Vec<&'static str> {
let mut features = Vec::new();
FLAGS
.lock()
.expect("locked")
.iter()
.for_each(|(_, flags)| append_features(&mut features, flags));

View File

@@ -200,15 +200,11 @@ pub async fn auth_check<E, F, Fut>(
if incoming_event.room_id().is_some() {
let Some(room_id_server_name) = incoming_event.room_id().unwrap().server_name()
else {
warn!("legacy room ID has no server name");
warn!("room ID has no servername");
return Ok(false);
};
if room_id_server_name != sender.server_name() {
warn!(
expected = %sender.server_name(),
received = %room_id_server_name,
"server name of legacy room ID does not match server name of sender"
);
warn!("servername of room ID does not match servername of sender");
return Ok(false);
}
}
@@ -219,12 +215,12 @@ pub async fn auth_check<E, F, Fut>(
.room_version
.is_some_and(|v| v.deserialize().is_err())
{
warn!("unsupported room version found in m.room.create event");
warn!("invalid room version found in m.room.create event");
return Ok(false);
}
if room_version.room_ids_as_hashes && incoming_event.room_id().is_some() {
warn!("room create event incorrectly claims to have a room ID when it should not");
warn!("room create event incorrectly claims a room ID");
return Ok(false);
}
@@ -233,7 +229,7 @@ pub async fn auth_check<E, F, Fut>(
{
// If content has no creator field, reject
if content.creator.is_none() {
warn!("m.room.create event incorrectly omits 'creator' field");
warn!("no creator field found in m.room.create content");
return Ok(false);
}
}
@@ -286,19 +282,16 @@ pub async fn auth_check<E, F, Fut>(
.room_version
.is_some_and(|v| v.deserialize().is_err())
{
warn!(
create_event_id = %room_create_event.event_id(),
"unsupported room version found in m.room.create event"
);
warn!("invalid room version found in m.room.create event");
return Ok(false);
}
let expected_room_id = room_create_event.room_id_or_hash();
if incoming_event.room_id().expect("event must have a room ID") != expected_room_id {
if incoming_event.room_id().unwrap() != expected_room_id {
warn!(
expected = %expected_room_id,
received = %incoming_event.room_id().unwrap(),
"room_id of incoming event ({}) does not match that of the m.room.create event ({})",
"room_id of incoming event ({}) does not match room_id of m.room.create event ({})",
incoming_event.room_id().unwrap(),
expected_room_id,
);
@@ -311,15 +304,12 @@ pub async fn auth_check<E, F, Fut>(
.auth_events()
.any(|id| id == room_create_event.event_id());
if room_version.room_ids_as_hashes && claims_create_event {
warn!("event incorrectly references m.room.create event in auth events");
warn!("m.room.create event incorrectly found in auth events");
return Ok(false);
} else if !room_version.room_ids_as_hashes && !claims_create_event {
// If the create event is not referenced in the event's auth events, and this is
// a v11 room, reject
warn!(
missing = %room_create_event.event_id(),
"event incorrectly did not reference an m.room.create in its auth events"
);
warn!("no m.room.create event found in auth events");
return Ok(false);
}
@@ -328,7 +318,7 @@ pub async fn auth_check<E, F, Fut>(
warn!(
expected = %expected_room_id,
received = %pe.room_id().unwrap(),
"room_id of referenced power levels event does not match that of the m.room.create event"
"room_id of power levels event does not match room_id of m.room.create event"
);
return Ok(false);
}
@@ -342,9 +332,8 @@ pub async fn auth_check<E, F, Fut>(
&& room_create_event.sender().server_name() != incoming_event.sender().server_name()
{
warn!(
sender = %incoming_event.sender(),
create_sender = %room_create_event.sender(),
"room is not federated and event's sender domain does not match create event's sender domain"
"room is not federated and event's sender domain does not match create event's \
sender domain"
);
return Ok(false);
}
@@ -427,6 +416,7 @@ pub async fn auth_check<E, F, Fut>(
&user_for_join_auth_membership,
&room_create_event,
)? {
warn!("membership change not valid for some reason");
return Ok(false);
}
@@ -439,7 +429,7 @@ pub async fn auth_check<E, F, Fut>(
let sender_member_event = match sender_member_event {
| Some(mem) => mem,
| None => {
warn!("sender has no membership event");
warn!("sender not found in room");
return Ok(false);
},
};
@@ -450,7 +440,7 @@ pub async fn auth_check<E, F, Fut>(
!= expected_room_id
{
warn!(
"room_id of incoming event ({}) does not match that of the m.room.create event ({})",
"room_id of incoming event ({}) does not match room_id of m.room.create event ({})",
sender_member_event
.room_id()
.expect("event must have a room ID"),
@@ -463,7 +453,8 @@ pub async fn auth_check<E, F, Fut>(
from_json_str(sender_member_event.content().get())?;
let Some(membership_state) = sender_membership_event_content.membership else {
warn!(
?sender_membership_event_content,
sender_membership_event_content = format!("{sender_membership_event_content:?}"),
event_id = format!("{}", incoming_event.event_id()),
"Sender membership event content missing membership field"
);
return Err(Error::InvalidPdu("Missing membership field".to_owned()));
@@ -471,11 +462,7 @@ pub async fn auth_check<E, F, Fut>(
let membership_state = membership_state.deserialize()?;
if !matches!(membership_state, MembershipState::Join) {
warn!(
%sender,
?membership_state,
"sender cannot send events without being joined to the room"
);
warn!("sender's membership is not join");
return Ok(false);
}
@@ -535,12 +522,7 @@ pub async fn auth_check<E, F, Fut>(
};
if sender_power_level < invite_level {
warn!(
%sender,
has=?sender_power_level,
required=?invite_level,
"sender cannot send invites in this room"
);
warn!("sender's cannot send invites in this room");
return Ok(false);
}
@@ -552,11 +534,7 @@ pub async fn auth_check<E, F, Fut>(
// level, reject If the event has a state_key that starts with an @ and does
// not match the sender, reject.
if !can_send_event(incoming_event, power_levels_event.as_ref(), sender_power_level) {
warn!(
%sender,
event_type=?incoming_event.kind(),
"sender cannot send event"
);
warn!("user cannot send event");
return Ok(false);
}
@@ -601,12 +579,6 @@ pub async fn auth_check<E, F, Fut>(
};
if !check_redaction(room_version, incoming_event, sender_power_level, redact_level)? {
warn!(
%sender,
?sender_power_level,
?redact_level,
"redaction event was not allowed"
);
return Ok(false);
}
}
@@ -615,21 +587,15 @@ pub async fn auth_check<E, F, Fut>(
Ok(true)
}
fn is_creator<EV>(
v: &RoomVersion,
c: &BTreeSet<OwnedUserId>,
ce: &EV,
user_id: &UserId,
have_pls: bool,
) -> bool
fn is_creator<EV>(v: &RoomVersion, c: &BTreeSet<OwnedUserId>, ce: &EV, user_id: &UserId) -> bool
where
EV: Event + Send + Sync,
{
if v.explicitly_privilege_room_creators {
c.contains(user_id)
} else if v.use_room_create_sender && !have_pls {
} else if v.use_room_create_sender {
ce.sender() == user_id
} else if !have_pls {
} else {
#[allow(deprecated)]
let creator = from_json_str::<RoomCreateEventContent>(ce.content().get())
.unwrap()
@@ -638,8 +604,6 @@ fn is_creator<EV>(
.unwrap();
creator == user_id
} else {
false
}
}
@@ -732,11 +696,10 @@ struct GetThirdPartyInvite {
}
trace!(?creators, "creators for room");
let join_rules = if let Some(jr) = &join_rules_event {
from_json_str::<RoomJoinRulesEventContent>(jr.content().get())?.join_rule
} else {
JoinRule::Invite
};
let mut join_rules = JoinRule::Invite;
if let Some(jr) = &join_rules_event {
join_rules = from_json_str::<RoomJoinRulesEventContent>(jr.content().get())?.join_rule;
}
let power_levels_event_id = power_levels_event.as_ref().map(Event::event_id);
let sender_membership_event_id = sender_membership_event.as_ref().map(Event::event_id);
@@ -762,13 +725,8 @@ struct GetThirdPartyInvite {
(int!(0), int!(0))
};
let user_joined = user_for_join_auth_membership == &MembershipState::Join;
let okay_power = is_creator(
room_version,
&creators,
create_room,
user_for_join_auth,
power_levels_event.as_ref().is_some(),
) || auth_user_pl >= invite_level;
let okay_power = is_creator(room_version, &creators, create_room, user_for_join_auth)
|| auth_user_pl >= invite_level;
trace!(
auth_user_pl=?auth_user_pl,
invite_level=?invite_level,
@@ -783,20 +741,8 @@ struct GetThirdPartyInvite {
trace!("No auth user given for join auth");
false
};
let sender_creator = is_creator(
room_version,
&creators,
create_room,
sender,
power_levels_event.as_ref().is_some(),
);
let target_creator = is_creator(
room_version,
&creators,
create_room,
target_user,
power_levels_event.as_ref().is_some(),
);
let sender_creator = is_creator(room_version, &creators, create_room, sender);
let target_creator = is_creator(room_version, &creators, create_room, target_user);
Ok(match target_membership {
| MembershipState::Join => {
@@ -813,7 +759,7 @@ struct GetThirdPartyInvite {
if prev_event_is_create_event && no_more_prev_events {
trace!(
%sender,
sender = %sender,
target_user = %target_user,
?sender_creator,
?target_creator,
@@ -833,33 +779,22 @@ struct GetThirdPartyInvite {
);
if sender != target_user {
// If the sender does not match state_key, reject.
warn!(
%sender,
target_user = %target_user,
"sender cannot join on behalf of another user"
);
warn!("Can't make other user join");
false
} else if target_user_current_membership == MembershipState::Ban {
// If the sender is banned, reject.
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
"sender cannot join as they are banned from the room"
);
warn!(?target_user_membership_event_id, "Banned user can't join");
false
} else {
match join_rules {
| JoinRule::Invite =>
if !membership_allows_join {
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
membership = ?target_user_current_membership,
"sender cannot join as they are not invited to the invite-only room"
membership=?target_user_current_membership,
"Join rule is invite but membership does not allow join"
);
false
} else {
trace!(sender=%sender, "sender is invited to room, allowing join");
true
},
| JoinRule::Knock if !room_version.allow_knocking => {
@@ -869,14 +804,11 @@ struct GetThirdPartyInvite {
| JoinRule::Knock =>
if !membership_allows_join {
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
membership=?target_user_current_membership,
"sender cannot join a knock room without being invited or already joined"
"Join rule is knock but membership does not allow join"
);
false
} else {
trace!(sender=%sender, "sender is invited or already joined to room, allowing join");
true
},
| JoinRule::KnockRestricted(_) if !room_version.knock_restricted_join_rule =>
@@ -888,55 +820,33 @@ struct GetThirdPartyInvite {
},
| JoinRule::KnockRestricted(_) => {
if membership_allows_join || user_for_join_auth_is_valid {
trace!(
%sender,
%membership_allows_join,
%user_for_join_auth_is_valid,
"sender is invited, already joined to, or authorised to join the room, allowing join"
);
true
} else {
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
membership=?target_user_current_membership,
%user_for_join_auth_is_valid,
?user_for_join_auth,
"sender cannot join as they are not invited nor already joined to the room, nor was a \
valid authorising user given to permit the join"
"Join rule is a restricted one, but no valid authorising user \
was given and the sender's current membership does not permit \
a join transition"
);
false
}
},
| JoinRule::Restricted(_) =>
if membership_allows_join || user_for_join_auth_is_valid {
trace!(
%sender,
%membership_allows_join,
%user_for_join_auth_is_valid,
"sender is invited, already joined to, or authorised to join the room, allowing join"
);
true
} else {
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
membership=?target_user_current_membership,
%user_for_join_auth_is_valid,
?user_for_join_auth,
"sender cannot join as they are not invited nor already joined to the room, nor was a \
valid authorising user given to permit the join"
"Join rule is a restricted one but no valid authorising user \
was given"
);
false
},
| JoinRule::Public => {
trace!(%sender, "join rule is public, allowing join");
true
},
| JoinRule::Public => true,
| _ => {
warn!(
join_rule=?join_rules,
"Join rule is unknown, or the rule's conditions were not met"
membership=?target_user_current_membership,
"Unknown join rule doesn't allow joining, or the rule's conditions were not met"
);
false
},
@@ -963,23 +873,16 @@ struct GetThirdPartyInvite {
}
allow
},
| _ =>
if !sender_is_joined {
warn!(
%sender,
?sender_membership_event_id,
?sender_membership,
"sender cannot produce an invite without being joined to the room",
);
false
} else if matches!(
target_user_current_membership,
MembershipState::Join | MembershipState::Ban
) {
| _ => {
if !sender_is_joined
|| target_user_current_membership == MembershipState::Join
|| target_user_current_membership == MembershipState::Ban
{
warn!(
?target_user_membership_event_id,
?target_user_current_membership,
"cannot invite a user who is banned or already joined",
?sender_membership_event_id,
"Can't invite user if sender not joined or the user is currently \
joined or banned",
);
false
} else {
@@ -989,124 +892,56 @@ struct GetThirdPartyInvite {
.is_some();
if !allow {
warn!(
%sender,
has=?sender_power,
required=?power_levels.invite,
"sender does not have enough power to produce invites",
?target_user_membership_event_id,
?power_levels_event_id,
"User does not have enough power to invite",
);
}
trace!(
%sender,
?sender_membership_event_id,
?sender_membership,
?target_user_membership_event_id,
?target_user_current_membership,
sender_pl=?sender_power,
required_pl=?power_levels.invite,
"allowing invite"
);
allow
},
}
},
}
},
| MembershipState::Leave => {
let can_unban = if target_user_current_membership == MembershipState::Ban {
sender_creator || sender_power.filter(|&p| p >= &power_levels.ban).is_some()
} else {
true
};
let can_kick = if !matches!(
target_user_current_membership,
MembershipState::Ban | MembershipState::Leave
) {
if sender_creator {
// sender is a creator
true
} else if sender_power.filter(|&p| p >= &power_levels.kick).is_none() {
// sender lacks kick power level
false
} else if let Some(sp) = sender_power {
if let Some(tp) = target_power {
// sender must have more power than target
sp > tp
} else {
// target has default power level
true
}
} else {
// sender has default power level
false
}
} else {
true
};
| MembershipState::Leave =>
if sender == target_user {
// self-leave
// let allow = target_user_current_membership == MembershipState::Join
// || target_user_current_membership == MembershipState::Invite
// || target_user_current_membership == MembershipState::Knock;
let allow = matches!(
target_user_current_membership,
MembershipState::Join | MembershipState::Invite | MembershipState::Knock
);
let allow = target_user_current_membership == MembershipState::Join
|| target_user_current_membership == MembershipState::Invite
|| target_user_current_membership == MembershipState::Knock;
if !allow {
warn!(
%sender,
current_membership_event_id=?target_user_membership_event_id,
current_membership=?target_user_current_membership,
"sender cannot leave as they are not already knocking on, invited to, or joined to the room"
?target_user_membership_event_id,
?target_user_current_membership,
"Can't leave if sender is not already invited, knocked, or joined"
);
}
trace!(sender=%sender, "allowing leave");
allow
} else if !sender_is_joined {
} else if !sender_is_joined
|| target_user_current_membership == MembershipState::Ban
&& (sender_creator
|| sender_power.filter(|&p| p < &power_levels.ban).is_some())
{
warn!(
%sender,
?target_user_membership_event_id,
?sender_membership_event_id,
"sender cannot kick another user as they are not joined to the room",
);
false
} else if !(can_unban && can_kick) {
// If the target is banned, only a room creator or someone with ban power
// level can unban them
warn!(
%sender,
?target_user_membership_event_id,
?power_levels_event_id,
"sender lacks the power level required to unban users",
);
false
} else if !can_kick {
warn!(
%sender,
%target_user,
?target_user_membership_event_id,
?target_user_current_membership,
?power_levels_event_id,
"sender does not have enough power to kick the target",
"Can't kick if sender not joined or user is already banned",
);
false
} else {
trace!(
%sender,
%target_user,
?target_user_membership_event_id,
?target_user_current_membership,
sender_pl=?sender_power,
target_pl=?target_power,
required_pl=?power_levels.kick,
"allowing kick/unban",
);
true
}
},
let allow = sender_creator
|| (sender_power.filter(|&p| p >= &power_levels.kick).is_some()
&& target_power < sender_power);
if !allow {
warn!(
?target_user_membership_event_id,
?power_levels_event_id,
"User does not have enough power to kick",
);
}
allow
},
| MembershipState::Ban =>
if !sender_is_joined {
warn!(
%sender,
?sender_membership_event_id,
"sender cannot ban another user as they are not joined to the room",
);
warn!(?sender_membership_event_id, "Can't ban user if sender is not joined");
false
} else {
let allow = sender_creator
@@ -1114,11 +949,9 @@ struct GetThirdPartyInvite {
&& target_power < sender_power);
if !allow {
warn!(
%sender,
%target_user,
?target_user_membership_event_id,
?power_levels_event_id,
"sender does not have enough power to ban the target",
"User does not have enough power to ban",
);
}
allow
@@ -1144,9 +977,9 @@ struct GetThirdPartyInvite {
} else if sender != target_user {
// 3. If `sender` does not match `state_key`, reject.
warn!(
%sender,
%target_user,
"sender cannot knock on behalf of another user",
?sender,
?target_user,
"Can't make another user knock, sender did not match target"
);
false
} else if matches!(
@@ -1158,25 +991,15 @@ struct GetThirdPartyInvite {
// 5. Otherwise, reject.
warn!(
?target_user_membership_event_id,
?sender_membership,
"Knocking with a membership state of ban, invite or join is invalid",
);
false
} else {
trace!(%sender, "allowing knock");
true
}
},
| _ => {
warn!(
%sender,
?target_membership,
%target_user,
%target_user_current_membership,
"Unknown or invalid membership transition {} -> {}",
target_user_current_membership,
target_membership
);
warn!("Unknown membership transition");
false
},
})
@@ -1206,13 +1029,6 @@ fn can_send_event(event: &impl Event, ple: Option<&impl Event>, user_level: Int)
if event.state_key().is_some_and(|k| k.starts_with('@'))
&& event.state_key() != Some(event.sender().as_str())
{
warn!(
%user_level,
required=?event_type_power_level,
state_key=?event.state_key(),
sender=%event.sender(),
"state_key starts with @ but does not match sender",
);
return false; // permission required to post in this room
}
@@ -1297,14 +1113,7 @@ fn check_power_levels(
// If the current value is equal to the sender's current power level, reject
if user != power_event.sender() && old_level == Some(&user_level) {
warn!(
?old_level,
?new_level,
?user,
%user_level,
sender=%power_event.sender(),
"cannot alter the power level of a user with the same power level as sender's own"
);
warn!("m.room.power_level cannot remove ops == to own");
return Some(false); // cannot remove ops level == to own
}
@@ -1312,26 +1121,8 @@ fn check_power_levels(
// If the new value is higher than the sender's current power level, reject
let old_level_too_big = old_level > Some(&user_level);
let new_level_too_big = new_level > Some(&user_level);
if old_level_too_big {
warn!(
?old_level,
?new_level,
?user,
%user_level,
sender=%power_event.sender(),
"cannot alter the power level of a user with a higher power level than sender's own"
);
return Some(false); // cannot add ops greater than own
}
if new_level_too_big {
warn!(
?old_level,
?new_level,
?user,
%user_level,
sender=%power_event.sender(),
"cannot set the power level of a user to a level higher than sender's own"
);
if old_level_too_big || new_level_too_big {
warn!("m.room.power_level failed to add ops > than own");
return Some(false); // cannot add ops greater than own
}
}
@@ -1348,26 +1139,8 @@ fn check_power_levels(
// If the new value is higher than the sender's current power level, reject
let old_level_too_big = old_level > Some(&user_level);
let new_level_too_big = new_level > Some(&user_level);
if old_level_too_big {
warn!(
?old_level,
?new_level,
?ev_type,
%user_level,
sender=%power_event.sender(),
"cannot alter the power level of an event with a higher power level than sender's own"
);
return Some(false); // cannot add ops greater than own
}
if new_level_too_big {
warn!(
?old_level,
?new_level,
?ev_type,
%user_level,
sender=%power_event.sender(),
"cannot set the power level of an event to a level higher than sender's own"
);
if old_level_too_big || new_level_too_big {
warn!("m.room.power_level failed to add ops > than own");
return Some(false); // cannot add ops greater than own
}
}
@@ -1382,13 +1155,7 @@ fn check_power_levels(
let old_level_too_big = old_level > user_level;
let new_level_too_big = new_level > user_level;
if old_level_too_big || new_level_too_big {
warn!(
?old_level,
?new_level,
%user_level,
sender=%power_event.sender(),
"cannot alter the power level of notifications greater than sender's own"
);
warn!("m.room.power_level failed to add ops > than own");
return Some(false); // cannot add ops greater than own
}
}
@@ -1412,14 +1179,7 @@ fn check_power_levels(
let new_level_too_big = new_lvl > user_level;
if old_level_too_big || new_level_too_big {
warn!(
?old_lvl,
?new_lvl,
%user_level,
sender=%power_event.sender(),
action=%lvl_name,
"cannot alter the power level of action greater than sender's own",
);
warn!("cannot add ops > than own");
return Some(false);
}
}

View File

@@ -36,7 +36,7 @@
room_version::RoomVersion,
};
use crate::{
debug, debug_error, err,
debug, debug_error,
matrix::{Event, StateKey},
state_res::room_version::StateResolutionVersion,
trace,
@@ -217,8 +217,14 @@ pub async fn resolve<'a, Pdu, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, Ex
)
.await?;
// Ensure unconflicting state is in the final state
// Add unconflicted state to the resolved state
// We priorities the unconflicting state
resolved_state.extend(clean);
if stateres_version == StateResolutionVersion::V2_1 {
resolved_state.extend(resolved_control);
// TODO(hydra): this feels disgusting and wrong but it allows
// the state to resolve properly?
}
debug!("state resolution finished");
trace!( map = ?resolved_state, "final resolved state" );
@@ -313,19 +319,8 @@ async fn calculate_conflicted_subgraph<F, Fut, E>(
path.pop();
continue;
}
trace!(event_id = event_id.as_str(), "fetching event for its auth events");
let evt = fetch_event(event_id.clone()).await;
if evt.is_none() {
err!("could not fetch event {} to calculate conflicted subgraph", event_id);
path.pop();
continue;
}
stack.push(
evt.expect("checked")
.auth_events()
.map(ToOwned::to_owned)
.collect(),
);
let evt = fetch_event(event_id.clone()).await?;
stack.push(evt.auth_events().map(ToOwned::to_owned).collect());
seen.insert(event_id);
}
Some(subgraph)
@@ -421,8 +416,8 @@ async fn reverse_topological_power_sort<E, F, Fut>(
/// `key_fn` is used as to obtain the power level and age of an event for
/// breaking ties (together with the event ID).
#[tracing::instrument(level = "debug", skip_all)]
pub async fn lexicographical_topological_sort<Id, F, Fut, Hasher, S>(
graph: &HashMap<Id, HashSet<Id, Hasher>, S>,
pub async fn lexicographical_topological_sort<Id, F, Fut, Hasher>(
graph: &HashMap<Id, HashSet<Id, Hasher>>,
key_fn: &F,
) -> Result<Vec<Id>>
where
@@ -430,7 +425,6 @@ pub async fn lexicographical_topological_sort<Id, F, Fut, Hasher, S>(
Fut: Future<Output = Result<(Int, MilliSecondsSinceUnixEpoch)>> + Send,
Id: Borrow<EventId> + Clone + Eq + Hash + Ord + Send + Sync,
Hasher: BuildHasher + Default + Clone + Send + Sync,
S: BuildHasher + Clone + Send + Sync,
{
#[derive(PartialEq, Eq)]
struct TieBreaker<'a, Id> {

View File

@@ -276,22 +276,3 @@ async fn set_intersection_sorted_stream2() {
.await;
assert!(r.eq(&["ccc", "ggg", "iii"]));
}
#[test]
fn is_within_bounds() {
use std::time::{Duration, SystemTime};
use utils::time::{TimeDirection, is_within_bounds};
let now = SystemTime::now();
let yesterday = now - Duration::from_secs(86400);
assert!(is_within_bounds(yesterday, now, TimeDirection::Before));
assert!(!is_within_bounds(yesterday, now, TimeDirection::After));
let tomorrow = now + Duration::from_secs(86400);
assert!(is_within_bounds(tomorrow, now, TimeDirection::After));
assert!(!is_within_bounds(tomorrow, now, TimeDirection::Before));
assert!(is_within_bounds(now, now, TimeDirection::Before));
assert!(is_within_bounds(now, now, TimeDirection::After));
}

View File

@@ -126,24 +126,3 @@ pub enum Unit {
Micros(u128),
Nanos(u128),
}
#[derive(Eq, PartialEq, Clone, Copy, Debug)]
pub enum TimeDirection {
Before,
After,
}
/// Checks if `item_time` is before or after `time_boundary`.
/// If both times are the same, it will return true for both directions, as the
/// matching is inclusive.
#[must_use]
pub fn is_within_bounds(
item_time: SystemTime,
time_boundary: SystemTime,
direction: TimeDirection,
) -> bool {
match direction {
| TimeDirection::Before => item_time <= time_boundary,
| TimeDirection::After => item_time >= time_boundary,
}
}

View File

@@ -1,9 +1,7 @@
use std::{cmp, convert::TryFrom};
use conduwuit::{Config, Result, utils, warn};
use rocksdb::{
Cache, DBCompressionType, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel,
};
use conduwuit::{Config, Result, utils};
use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel};
use super::{cf_opts::cache_size_f64, logger::handle as handle_log};
@@ -60,20 +58,6 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul
opts.set_max_total_wal_size(1024 * 1024 * 512);
opts.set_writable_file_max_buffer_size(1024 * 1024 * 2);
// WAL compression
let wal_compression = match config.rocksdb_wal_compression.as_ref() {
| "zstd" => DBCompressionType::Zstd,
| "none" => DBCompressionType::None,
| value => {
warn!(
"Invalid rocksdb_wal_compression value '{value}'. Supported values are 'none' \
or 'zstd'. Defaulting to 'none'."
);
DBCompressionType::None
},
};
opts.set_wal_compression_type(wal_compression);
// Misc
opts.set_disable_auto_compactions(!config.rocksdb_compaction);
opts.create_missing_column_families(true);

View File

@@ -10,7 +10,7 @@
use futures::{Stream, StreamExt, TryStreamExt};
use rocksdb::{DBPinnableSlice, ReadOptions};
use super::get::handle_from;
use super::get::{cached_handle_from, handle_from};
use crate::Handle;
pub trait Get<'a, K, S>
@@ -58,6 +58,20 @@ pub(crate) fn get_batch<'a, S, K>(
.try_flatten()
}
#[implement(super::Map)]
#[tracing::instrument(name = "batch_cached", level = "trace", skip_all)]
pub(crate) fn get_batch_cached<'a, I, K>(
&self,
keys: I,
) -> impl Iterator<Item = Result<Option<Handle<'_>>>> + Send + use<'_, I, K>
where
I: Iterator<Item = &'a K> + ExactSizeIterator + Send,
K: AsRef<[u8]> + Send + ?Sized + Sync + 'a,
{
self.get_batch_blocking_opts(keys, &self.cache_read_options)
.map(cached_handle_from)
}
#[implement(super::Map)]
#[tracing::instrument(name = "batch_blocking", level = "trace", skip_all)]
pub(crate) fn get_batch_blocking<'a, I, K>(

View File

@@ -184,7 +184,7 @@ fn spawn_one(
let handle = thread::Builder::new()
.name(WORKER_NAME.into())
.stack_size(WORKER_STACK_SIZE)
.spawn(move || self.worker(id, &recv))?;
.spawn(move || self.worker(id, recv))?;
workers.push(handle);
@@ -260,9 +260,9 @@ async fn execute(&self, queue: &Sender<Cmd>, cmd: Cmd) -> Result {
tid = ?thread::current().id(),
),
)]
fn worker(self: Arc<Self>, id: usize, recv: &Receiver<Cmd>) {
fn worker(self: Arc<Self>, id: usize, recv: Receiver<Cmd>) {
self.worker_init(id);
self.worker_loop(recv);
self.worker_loop(&recv);
}
#[implement(Pool)]
@@ -309,7 +309,7 @@ fn worker_loop(self: &Arc<Self>, recv: &Receiver<Cmd>) {
self.busy.fetch_add(1, Ordering::Relaxed);
while let Ok(cmd) = self.worker_wait(recv) {
Pool::worker_handle(cmd);
self.worker_handle(cmd);
}
}
@@ -331,11 +331,11 @@ fn worker_wait(self: &Arc<Self>, recv: &Receiver<Cmd>) -> Result<Cmd, RecvError>
}
#[implement(Pool)]
fn worker_handle(cmd: Cmd) {
fn worker_handle(self: &Arc<Self>, cmd: Cmd) {
match cmd {
| Cmd::Get(cmd) if cmd.key.len() == 1 => Pool::handle_get(cmd),
| Cmd::Get(cmd) => Pool::handle_batch(cmd),
| Cmd::Iter(cmd) => Pool::handle_iter(cmd),
| Cmd::Get(cmd) if cmd.key.len() == 1 => self.handle_get(cmd),
| Cmd::Get(cmd) => self.handle_batch(cmd),
| Cmd::Iter(cmd) => self.handle_iter(cmd),
}
}
@@ -346,7 +346,7 @@ fn worker_handle(cmd: Cmd) {
skip_all,
fields(%cmd.map),
)]
fn handle_iter(mut cmd: Seek) {
fn handle_iter(&self, mut cmd: Seek) {
let chan = cmd.res.take().expect("missing result channel");
if chan.is_canceled() {
@@ -375,7 +375,7 @@ fn handle_iter(mut cmd: Seek) {
keys = %cmd.key.len(),
),
)]
fn handle_batch(mut cmd: Get) {
fn handle_batch(self: &Arc<Self>, mut cmd: Get) {
debug_assert!(cmd.key.len() > 1, "should have more than one key");
debug_assert!(!cmd.key.iter().any(SmallVec::is_empty), "querying for empty key");
@@ -401,7 +401,7 @@ fn handle_batch(mut cmd: Get) {
skip_all,
fields(%cmd.map),
)]
fn handle_get(mut cmd: Get) {
fn handle_get(&self, mut cmd: Get) {
debug_assert!(!cmd.key[0].is_empty(), "querying for empty key");
// Obtain the result channel.

View File

@@ -25,13 +25,13 @@ pub(super) fn refutable(mut item: ItemFn, _args: &[Meta]) -> Result<TokenStream>
};
let name = format!("_args_{i}");
**pat = Pat::Ident(PatIdent {
*pat = Box::new(Pat::Ident(PatIdent {
ident: Ident::new(&name, Span::call_site().into()),
attrs: Vec::new(),
by_ref: None,
mutability: None,
subpat: None,
});
}));
let field = fields.iter();
let refute = quote! {

View File

@@ -15,13 +15,13 @@ pub(super) fn flags_capture(args: TokenStream) -> TokenStream {
#[ctor]
fn _set_rustc_flags() {
conduwuit_core::info::rustc::FLAGS.lock().expect("locked").insert(#crate_name, &RUSTC_FLAGS);
conduwuit_core::info::rustc::FLAGS.lock().insert(#crate_name, &RUSTC_FLAGS);
}
// static strings have to be yanked on module unload
#[dtor]
fn _unset_rustc_flags() {
conduwuit_core::info::rustc::FLAGS.lock().expect("locked").remove(#crate_name);
conduwuit_core::info::rustc::FLAGS.lock().remove(#crate_name);
}
};

View File

@@ -62,8 +62,7 @@ standard = [
"media_thumbnail",
"systemd",
"url_preview",
"zstd_compression",
"sentry_telemetry"
"zstd_compression"
]
full = [
"standard",
@@ -130,6 +129,7 @@ perf_measurements = [
"dep:tracing-opentelemetry",
"dep:opentelemetry_sdk",
"dep:opentelemetry-otlp",
"dep:opentelemetry-jaeger-propagator",
"conduwuit-core/perf_measurements",
"conduwuit-core/sentry_telemetry",
]
@@ -156,7 +156,6 @@ sentry_telemetry = [
]
systemd = [
"conduwuit-router/systemd",
"conduwuit-service/systemd"
]
journald = [ # This is a stub on non-unix platforms
"dep:tracing-journald",
@@ -201,7 +200,6 @@ conduwuit-core.workspace = true
conduwuit-database.workspace = true
conduwuit-router.workspace = true
conduwuit-service.workspace = true
conduwuit-build-metadata.workspace = true
clap.workspace = true
console-subscriber.optional = true
@@ -213,6 +211,8 @@ opentelemetry.optional = true
opentelemetry.workspace = true
opentelemetry-otlp.optional = true
opentelemetry-otlp.workspace = true
opentelemetry-jaeger-propagator.optional = true
opentelemetry-jaeger-propagator.workspace = true
opentelemetry_sdk.optional = true
opentelemetry_sdk.workspace = true
sentry-tower.optional = true

View File

@@ -94,7 +94,7 @@ pub(crate) fn init(
let otlp_layer = config.allow_otlp.then(|| {
opentelemetry::global::set_text_map_propagator(
opentelemetry_sdk::propagation::TraceContextPropagator::new(),
opentelemetry_jaeger_propagator::Propagator::new(),
);
let exporter = opentelemetry_otlp::SpanExporter::builder()

View File

@@ -1,12 +1,10 @@
#![cfg(feature = "sentry_telemetry")]
use std::{
borrow::Cow,
str::FromStr,
sync::{Arc, OnceLock},
};
use conduwuit_build_metadata as build;
use conduwuit_core::{config::Config, debug, trace};
use sentry::{
Breadcrumb, ClientOptions, Level,
@@ -46,7 +44,7 @@ fn options(config: &Config) -> ClientOptions {
server_name,
traces_sample_rate: config.sentry_traces_sample_rate,
debug: cfg!(debug_assertions),
release: release_name(),
release: sentry::release_name!(),
user_agent: conduwuit_core::version::user_agent().into(),
attach_stacktrace: config.sentry_attach_stacktrace,
before_send: Some(Arc::new(before_send)),
@@ -93,21 +91,3 @@ fn before_breadcrumb(crumb: Breadcrumb) -> Option<Breadcrumb> {
trace!("Sentry breadcrumb: {crumb:?}");
Some(crumb)
}
fn release_name() -> Option<Cow<'static, str>> {
static RELEASE: OnceLock<Option<String>> = OnceLock::new();
RELEASE
.get_or_init(|| {
let pkg_name = env!("CARGO_PKG_NAME");
let pkg_version = env!("CARGO_PKG_VERSION");
if let Some(commit_short) = build::GIT_COMMIT_HASH_SHORT {
Some(format!("{pkg_name}@{pkg_version}+{commit_short}"))
} else {
Some(format!("{pkg_name}@{pkg_version}"))
}
})
.as_ref()
.map(|s| Cow::Borrowed(s.as_str()))
}

View File

@@ -40,6 +40,7 @@ io_uring = [
"conduwuit-admin/io_uring",
"conduwuit-api/io_uring",
"conduwuit-service/io_uring",
"conduwuit-api/io_uring",
]
jemalloc = [
"conduwuit-admin/jemalloc",

View File

@@ -65,7 +65,7 @@ pub(crate) async fn start(server: Arc<Server>) -> Result<Arc<Services>> {
let services = Services::build(server).await?.start().await?;
#[cfg(all(feature = "systemd", target_os = "linux"))]
sd_notify::notify(false, &[sd_notify::NotifyState::Ready])
sd_notify::notify(true, &[sd_notify::NotifyState::Ready])
.expect("failed to notify systemd of ready state");
debug!("Started");
@@ -78,7 +78,7 @@ pub(crate) async fn stop(services: Arc<Services>) -> Result<()> {
debug!("Shutting down...");
#[cfg(all(feature = "systemd", target_os = "linux"))]
sd_notify::notify(false, &[sd_notify::NotifyState::Stopping])
sd_notify::notify(true, &[sd_notify::NotifyState::Stopping])
.expect("failed to notify systemd of stopping state");
// Wait for all completions before dropping or we'll lose them to the module

View File

@@ -67,9 +67,6 @@ release_max_log_level = [
"tracing/max_level_trace",
"tracing/release_max_level_info",
]
systemd = [
"dep:sd-notify",
]
url_preview = [
"dep:image",
"dep:webpage",
@@ -108,7 +105,7 @@ rustyline-async.workspace = true
rustyline-async.optional = true
serde_json.workspace = true
serde.workspace = true
serde-saphyr.workspace = true
serde_yml.workspace = true
sha2.workspace = true
termimad.workspace = true
termimad.optional = true
@@ -122,9 +119,5 @@ blurhash.optional = true
recaptcha-verify = { version = "0.1.5", default-features = false }
ctor.workspace = true
[target.'cfg(all(unix, target_os = "linux"))'.dependencies]
sd-notify.workspace = true
sd-notify.optional = true
[lints]
workspace = true

View File

@@ -271,7 +271,7 @@ pub async fn get_db_registration(&self, id: &str) -> Result<Registration> {
.id_appserviceregistrations
.get(id)
.await
.and_then(|ref bytes| serde_saphyr::from_slice(bytes).map_err(Into::into))
.and_then(|ref bytes| serde_yml::from_slice(bytes).map_err(Into::into))
.map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}")))
}

View File

@@ -45,16 +45,13 @@ fn deref(&self) -> &Self::Target { &self.server.config }
fn handle_reload(&self) -> Result {
if self.server.config.config_reload_signal {
#[cfg(all(feature = "systemd", target_os = "linux"))]
sd_notify::notify(false, &[
sd_notify::NotifyState::Reloading,
sd_notify::NotifyState::monotonic_usec_now().expect("Failed to read monotonic time"),
])
.expect("failed to notify systemd of reloading state");
sd_notify::notify(true, &[sd_notify::NotifyState::Reloading])
.expect("failed to notify systemd of reloading state");
self.reload(iter::empty())?;
#[cfg(all(feature = "systemd", target_os = "linux"))]
sd_notify::notify(false, &[sd_notify::NotifyState::Ready])
sd_notify::notify(true, &[sd_notify::NotifyState::Ready])
.expect("failed to notify systemd of ready state");
}

View File

@@ -11,10 +11,7 @@
use base64::{Engine as _, engine::general_purpose};
use conduwuit::{
Err, Result, Server, debug, debug_error, debug_info, debug_warn, err, error, trace,
utils::{
self, MutexMap,
time::{self, TimeDirection},
},
utils::{self, MutexMap},
warn,
};
use ruma::{Mxc, OwnedMxcUri, UserId, http_headers::ContentDisposition};
@@ -93,22 +90,17 @@ pub async fn create(
file: &[u8],
) -> Result<()> {
// Width, Height = 0 if it's not a thumbnail
let key = self
.db
.create_file_metadata(mxc, user, &Dim::default(), content_disposition, content_type)
.map_err(|e| {
err!(Database(error!("Failed to create media metadata for MXC {mxc}: {e}")))
})?;
let key = self.db.create_file_metadata(
mxc,
user,
&Dim::default(),
content_disposition,
content_type,
)?;
//TODO: Dangling metadata in database if creation fails
let mut f = self.create_media_file(&key).await.map_err(|e| {
err!(Database(error!(
"Failed to create media file for MXC {mxc} at key {key:?}: {e}"
)))
})?;
f.write_all(file).await.map_err(|e| {
err!(Database(error!("Failed to write media file for MXC {mxc} at key {key:?}: {e}")))
})?;
let mut f = self.create_media_file(&key).await?;
f.write_all(file).await?;
Ok(())
}
@@ -229,12 +221,13 @@ pub async fn get_all_mxcs(&self) -> Result<Vec<OwnedMxcUri>> {
Ok(mxcs)
}
/// Deletes all media files in the given time frame.
/// Returns a usize with the amount of media files deleted.
pub async fn delete_all_media_within_timeframe(
/// Deletes all remote only media files in the given at or after
/// time/duration. Returns a usize with the amount of media files deleted.
pub async fn delete_all_remote_media_at_after_time(
&self,
time_boundary: SystemTime,
direction: TimeDirection,
time: SystemTime,
before: bool,
after: bool,
yes_i_want_to_delete_local_media: bool,
) -> Result<usize> {
let all_keys = self.db.get_all_media_keys().await;
@@ -301,14 +294,18 @@ pub async fn delete_all_media_within_timeframe(
debug!("File created at: {file_created_at:?}");
if time::is_within_bounds(file_created_at, time_boundary, direction) {
if file_created_at >= time && before {
debug!(
"File is within bounds ({direction:?} {time_boundary:?}), pushing to list \
of file paths and keys to delete.",
"File is within (before) user duration, pushing to list of file paths and \
keys to delete."
);
remote_mxcs.push(mxc.to_string());
} else if file_created_at <= time && after {
debug!(
"File is not within (after) user duration, pushing to list of file paths \
and keys to delete."
);
remote_mxcs.push(mxc.to_string());
} else {
debug!("File is outside bounds ({direction:?} {time_boundary:?}), ignoring.");
}
}
@@ -316,7 +313,7 @@ pub async fn delete_all_media_within_timeframe(
return Err!(Database("Did not found any eligible MXCs to delete."));
}
debug_info!("Deleting media now {direction:?} {time_boundary:?}");
debug_info!("Deleting media now in the past {time:?}");
let mut deletion_count: usize = 0;

View File

@@ -9,7 +9,6 @@
},
warn,
};
use database::Json;
use futures::{FutureExt, StreamExt, TryStreamExt};
use itertools::Itertools;
use ruma::{
@@ -607,7 +606,7 @@ async fn fix_corrupt_msc4133_fields(services: &Services) -> Result {
);
};
useridprofilekey_value.put((user, key), Json(new_value));
useridprofilekey_value.put((user, key), new_value);
fixed = fixed.saturating_add(1);
}
total = total.saturating_add(1);

View File

@@ -188,7 +188,9 @@ pub fn local_aliases_for_room<'a>(
}
#[tracing::instrument(skip(self), level = "debug")]
pub fn all_local_aliases(&self) -> impl Stream<Item = (&RoomId, &str)> + Send + '_ {
pub fn all_local_aliases<'a>(
&'a self,
) -> impl Stream<Item = (&'a RoomId, &'a str)> + Send + 'a {
self.db
.alias_roomid
.stream()

View File

@@ -4,8 +4,9 @@
};
use conduwuit::{
Event, PduEvent, debug, debug_warn, implement, matrix::event::gen_event_id_canonical_json,
trace, utils::continue_exponential_backoff_secs, warn,
Event, PduEvent, debug, debug_error, debug_warn, implement,
matrix::event::gen_event_id_canonical_json, trace, utils::continue_exponential_backoff_secs,
warn,
};
use ruma::{
CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName,
@@ -51,14 +52,12 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
};
let mut events_with_auth_events = Vec::with_capacity(events.clone().count());
trace!("Fetching {} outlier pdus", events.clone().count());
for id in events {
// a. Look in the main timeline (pduid_pdu tree)
// b. Look at outlier pdu tree
// (get_pdu_json checks both)
if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await {
trace!("Found {id} in main timeline or outlier tree");
events_with_auth_events.push((id.to_owned(), Some(local_pdu), vec![]));
continue;
}
@@ -105,7 +104,7 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
continue;
}
debug!("Fetching {next_id} over federation from {origin}.");
debug!("Fetching {next_id} over federation.");
match self
.services
.sending
@@ -116,7 +115,7 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
.await
{
| Ok(res) => {
debug!("Got {next_id} over federation from {origin}");
debug!("Got {next_id} over federation");
let Ok(room_version_id) = get_room_version_id(create_event) else {
back_off((*next_id).to_owned());
continue;
@@ -146,9 +145,6 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
auth_event.clone().into(),
) {
| Ok(auth_event) => {
trace!(
"Found auth event id {auth_event} for event {next_id}"
);
todo_auth_events.push_back(auth_event);
},
| _ => {
@@ -164,7 +160,7 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
events_all.insert(next_id);
},
| Err(e) => {
warn!("Failed to fetch auth event {next_id} from {origin}: {e}");
debug_error!("Failed to fetch event {next_id}: {e}");
back_off((*next_id).to_owned());
},
}
@@ -179,7 +175,7 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
// b. Look at outlier pdu tree
// (get_pdu_json checks both)
if let Some(local_pdu) = local_pdu {
trace!("Found {id} in main timeline or outlier tree");
trace!("Found {id} in db");
pdus.push((local_pdu.clone(), None));
}
@@ -205,7 +201,6 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
}
}
trace!("Handling outlier {next_id}");
match Box::pin(self.handle_outlier_pdu(
origin,
create_event,
@@ -218,7 +213,6 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
{
| Ok((pdu, json)) =>
if next_id == *id {
trace!("Handled outlier {next_id} (original request)");
pdus.push((pdu, Some(json)));
},
| Err(e) => {
@@ -228,6 +222,6 @@ pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>(
}
}
}
trace!("Fetched and handled {} outlier pdus", pdus.len());
pdus
}

View File

@@ -1,12 +1,11 @@
use std::collections::{BTreeMap, HashMap, hash_map};
use conduwuit::{
Err, Event, PduEvent, Result, debug, debug_info, debug_warn, err, implement, state_res, trace,
Err, Event, PduEvent, Result, debug, debug_info, err, implement, state_res, trace, warn,
};
use futures::future::ready;
use ruma::{
CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName,
events::StateEventType,
CanonicalJsonObject, CanonicalJsonValue, EventId, RoomId, ServerName, events::StateEventType,
};
use super::{check_room_id, get_room_version_id, to_room_version};
@@ -75,73 +74,36 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
check_room_id(room_id, &pdu_event)?;
// Fetch all auth events
let mut auth_events: HashMap<OwnedEventId, PduEvent> = HashMap::new();
for aid in pdu_event.auth_events() {
if let Ok(auth_event) = self.services.timeline.get_pdu(aid).await {
check_room_id(room_id, &auth_event)?;
trace!("Found auth event {aid} for outlier event {event_id} locally");
auth_events.insert(aid.to_owned(), auth_event);
} else {
debug_warn!("Could not find auth event {aid} for outlier event {event_id} locally");
}
}
// Fetch any missing ones & reject invalid ones
let missing_auth_events = if auth_events_known {
pdu_event
.auth_events()
.filter(|id| !auth_events.contains_key(*id))
.collect::<Vec<_>>()
} else {
pdu_event.auth_events().collect::<Vec<_>>()
};
if !missing_auth_events.is_empty() || !auth_events_known {
debug_info!(
"Fetching {} missing auth events for outlier event {event_id}",
missing_auth_events.len()
);
for (pdu, _) in self
.fetch_and_handle_outliers(
origin,
missing_auth_events.iter().copied(),
create_event,
room_id,
)
.await
{
auth_events.insert(pdu.event_id().to_owned(), pdu);
}
} else {
debug!("No missing auth events for outlier event {event_id}");
}
// reject if we are still missing some
let still_missing = pdu_event
.auth_events()
.filter(|id| !auth_events.contains_key(*id))
.collect::<Vec<_>>();
if !still_missing.is_empty() {
return Err!(Request(InvalidParam(
"Could not fetch all auth events for outlier event {event_id}, still missing: \
{still_missing:?}"
)));
if !auth_events_known {
// 4. fetch any missing auth events doing all checks listed here starting at 1.
// These are not timeline events
// 5. Reject "due to auth events" if can't get all the auth events or some of
// the auth events are also rejected "due to auth events"
// NOTE: Step 5 is not applied anymore because it failed too often
debug!("Fetching auth events");
Box::pin(self.fetch_and_handle_outliers(
origin,
pdu_event.auth_events(),
create_event,
room_id,
))
.await;
}
// 6. Reject "due to auth events" if the event doesn't pass auth based on the
// auth events
debug!("Checking based on auth events");
let mut auth_events_by_key: HashMap<_, _> = HashMap::with_capacity(auth_events.len());
// Build map of auth events
let mut auth_events = HashMap::with_capacity(pdu_event.auth_events().count());
for id in pdu_event.auth_events() {
let auth_event = auth_events
.get(id)
.expect("we just checked that we have all auth events")
.to_owned();
let Ok(auth_event) = self.services.timeline.get_pdu(id).await else {
warn!("Could not find auth event {id}");
continue;
};
check_room_id(room_id, &auth_event)?;
match auth_events_by_key.entry((
match auth_events.entry((
auth_event.kind.to_string().into(),
auth_event
.state_key
@@ -161,7 +123,7 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
// The original create event must be in the auth events
if !matches!(
auth_events_by_key.get(&(StateEventType::RoomCreate, String::new().into())),
auth_events.get(&(StateEventType::RoomCreate, String::new().into())),
Some(_) | None
) {
return Err!(Request(InvalidParam("Incoming event refers to wrong create event.")));
@@ -169,7 +131,7 @@ pub(super) async fn handle_outlier_pdu<'a, Pdu>(
let state_fetch = |ty: &StateEventType, sk: &str| {
let key = (ty.to_owned(), sk.into());
ready(auth_events_by_key.get(&key).map(ToOwned::to_owned))
ready(auth_events.get(&key).map(ToOwned::to_owned))
};
let auth_check = state_res::event_auth::auth_check(

View File

@@ -5,9 +5,9 @@
use std::time::Duration;
use conduwuit::{Err, Event, PduEvent, Result, debug, debug_info, implement, trace, warn};
use conduwuit::{Err, Event, PduEvent, Result, debug, implement, warn};
use ruma::{
CanonicalJsonObject, RoomId, ServerName,
RoomId, ServerName,
api::federation::room::policy::v1::Request as PolicyRequest,
events::{StateEventType, room::policy::RoomPolicyEventContent},
};
@@ -25,25 +25,7 @@
/// fail-open operation.
#[implement(super::Service)]
#[tracing::instrument(skip_all, level = "debug")]
pub async fn ask_policy_server(
&self,
pdu: &PduEvent,
pdu_json: &CanonicalJsonObject,
room_id: &RoomId,
) -> Result<bool> {
if !self.services.server.config.enable_msc4284_policy_servers {
return Ok(true); // don't ever contact policy servers
}
if self.services.server.config.policy_server_check_own_events
&& pdu.origin.is_some()
&& self
.services
.server
.is_ours(pdu.origin.as_ref().unwrap().as_str())
{
return Ok(true); // don't contact policy servers for locally generated events
}
pub async fn ask_policy_server(&self, pdu: &PduEvent, room_id: &RoomId) -> Result<bool> {
if *pdu.event_type() == StateEventType::RoomPolicy.into() {
debug!(
room_id = %room_id,
@@ -65,12 +47,12 @@ pub async fn ask_policy_server(
let via = match policyserver.via {
| Some(ref via) => ServerName::parse(via)?,
| None => {
trace!("No policy server configured for room {room_id}");
debug!("No policy server configured for room {room_id}");
return Ok(true);
},
};
if via.is_empty() {
trace!("Policy server is empty for room {room_id}, skipping spam check");
debug!("Policy server is empty for room {room_id}, skipping spam check");
return Ok(true);
}
if !self.services.state_cache.server_in_room(via, room_id).await {
@@ -84,12 +66,12 @@ pub async fn ask_policy_server(
let outgoing = self
.services
.sending
.convert_to_outgoing_federation_event(pdu_json.clone())
.convert_to_outgoing_federation_event(pdu.to_canonical_object())
.await;
debug_info!(
debug!(
room_id = %room_id,
via = %via,
outgoing = ?pdu_json,
outgoing = ?outgoing,
"Checking event for spam with policy server"
);
let response = tokio::time::timeout(
@@ -103,10 +85,7 @@ pub async fn ask_policy_server(
)
.await;
let response = match response {
| Ok(Ok(response)) => {
debug!("Response from policy server: {:?}", response);
response
},
| Ok(Ok(response)) => response,
| Ok(Err(e)) => {
warn!(
via = %via,
@@ -118,18 +97,16 @@ pub async fn ask_policy_server(
// default.
return Err(e);
},
| Err(elapsed) => {
| Err(_) => {
warn!(
%via,
via = %via,
event_id = %pdu.event_id(),
%room_id,
%elapsed,
room_id = %room_id,
"Policy server request timed out after 10 seconds"
);
return Err!("Request to policy server timed out");
},
};
trace!("Recommendation from policy server was {}", response.recommendation);
if response.recommendation == "spam" {
warn!(
via = %via,

View File

@@ -255,10 +255,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
// 14-pre. If the event is not a state event, ask the policy server about it
if incoming_pdu.state_key.is_none() {
debug!(event_id = %incoming_pdu.event_id, "Checking policy server for event");
match self
.ask_policy_server(&incoming_pdu, &incoming_pdu.to_canonical_object(), room_id)
.await
{
match self.ask_policy_server(&incoming_pdu, room_id).await {
| Ok(false) => {
warn!(
event_id = %incoming_pdu.event_id,

View File

@@ -1,10 +1,9 @@
use conduwuit::{Err, Result, RoomVersion, implement, matrix::Event, pdu::PduBuilder};
use conduwuit::{Err, Result, implement, matrix::Event, pdu::PduBuilder};
use ruma::{
EventId, RoomId, UserId,
events::{
StateEventType, TimelineEventType,
room::{
create::RoomCreateEventContent,
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
member::{MembershipState, RoomMemberEventContent},
power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent},
@@ -45,23 +44,6 @@ pub async fn user_can_redact(
)));
}
let room_create = self
.room_state_get(room_id, &StateEventType::RoomCreate, "")
.await?;
let create_content: RoomCreateEventContent =
serde_json::from_str(room_create.content().get())?;
let room_features = RoomVersion::new(&create_content.room_version)?;
if room_features.explicitly_privilege_room_creators {
let sender_owned = sender.to_owned();
if sender == room_create.sender()
|| create_content
.additional_creators
.is_some_and(|cs| cs.contains(&sender_owned))
{
return Ok(true);
}
}
match self
.room_state_get_content::<RoomPowerLevelsEventContent>(
room_id,
@@ -86,10 +68,18 @@ pub async fn user_can_redact(
},
| _ => {
// Falling back on m.room.create to judge power level
Ok(room_create.sender() == sender
|| redacting_event
.as_ref()
.is_ok_and(|redacting_event| redacting_event.sender() == sender))
match self
.room_state_get(room_id, &StateEventType::RoomCreate, "")
.await
{
| Ok(room_create) => Ok(room_create.sender() == sender
|| redacting_event
.as_ref()
.is_ok_and(|redacting_event| redacting_event.sender() == sender)),
| _ => Err!(Database(
"No m.room.power_levels or m.room.create events in database for room"
)),
}
},
}
}

View File

@@ -9,7 +9,6 @@
state_res::{self, RoomVersion},
},
utils::{self, IterStream, ReadyExt, stream::TryIgnore},
warn,
};
use futures::{StreamExt, TryStreamExt, future, future::ready};
use ruma::{
@@ -20,6 +19,7 @@
uint,
};
use serde_json::value::{RawValue, to_raw_value};
use tracing::warn;
use super::RoomMutexGuard;
@@ -267,19 +267,23 @@ fn from_evt(
| _ => Err!(Request(Unknown(warn!("Signing event failed: {e}")))),
};
}
// Generate event id
pdu.event_id = gen_event_id(&pdu_json, &room_version_id)?;
// Check with the policy server
pdu_json.insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into()));
// Check with the policy server
if room_id.is_some() {
trace!(
"Checking event in room {} with policy server",
"Checking event {} in room {} with policy server",
pdu.event_id,
pdu.room_id.as_ref().map_or("None", |id| id.as_str())
);
match self
.services
.event_handler
.ask_policy_server(&pdu, &pdu_json, pdu.room_id().expect("has room ID"))
.ask_policy_server(&pdu, &pdu.room_id_or_hash())
.await
{
| Ok(true) => {},

View File

@@ -345,7 +345,7 @@ pub async fn displayname(&self, user_id: &UserId) -> Result<String> {
}
/// Sets a new displayname or removes it if displayname is None. You still
/// need to notify all rooms of this change.
/// need to nofify all rooms of this change.
pub fn set_displayname(&self, user_id: &UserId, displayname: Option<String>) {
if let Some(displayname) = displayname {
self.db.userid_displayname.insert(user_id, displayname);

View File

@@ -1,6 +0,0 @@
[formatting]
align_entries = true # Align entries vertically. Entries that have table headers, comments, or blank lines between them are not aligned.
reorder_arrays = true # Alphabetically reorder array values that are not separated by blank lines.
reorder_inline_tables = true # Alphabetically reorder inline tables.
reorder_keys = true # Alphabetically reorder keys that are not separated by blank lines.