Compare commits

..

27 Commits

Author SHA1 Message Date
nexy7574
446db274a3 fix(hydra): Always append the current extremity to leaves 2025-09-17 21:32:46 +01:00
nexy7574
6840ec45f7 fix(hydra): Use an enum instead of a float for stateres version 2025-09-17 21:32:46 +01:00
nexy7574
c4a2773230 fix(hydra): Fix rocksdb compile errors 2025-09-17 21:32:46 +01:00
nexy7574
502fbbf0cd chore(hydra): Bump ruwuma.. again 2025-09-17 21:32:46 +01:00
nexy7574
19bd8a3c05 chore(hydra): Bump ruwuma *correctly*
# Conflicts:
#	Cargo.lock
#	Cargo.toml
2025-09-17 21:32:46 +01:00
nexy7574
8ae73d455f chore(hydra): Bump ruwuma 2025-09-17 21:32:46 +01:00
nexy7574
ccb112ef05 fix(hydra): Fix unknown join rule processing, again 2025-09-17 21:32:46 +01:00
nexy7574
b00f6ffbed fix(hydra): Fix unknown join rule processing 2025-09-17 21:32:46 +01:00
nexy7574
2e252f0841 fix(hydra): Don't use 2.1 in v6 room in test_event_sort 2025-09-17 21:32:46 +01:00
nexy7574
936f0a669b fix(hydra): Fix state resolution v2.0 bug 2025-09-17 21:32:46 +01:00
nexy7574
35b7b45ea0 fix(hydra): Fix v11 room support 2025-09-17 21:32:46 +01:00
nexy7574
ff92573103 fix(hydra): Correctly create short state hash for <v12 rooms 2025-09-17 21:32:46 +01:00
nexy7574
4ed19a1630 fix(hydra): Own it 2025-09-17 21:32:46 +01:00
nexy7574
a35f009d41 fix(hydra): Idk maybe this will fix it 2025-09-17 21:32:46 +01:00
nexy7574
540cd28d44 fix(hydra): Incorrect "auth event for incorrect room" 2025-09-17 21:32:46 +01:00
nexy7574
344e1e7d76 fix(hydra): Restricted join -> join is allowed 2025-09-17 21:32:46 +01:00
nexy7574
4446e96889 feat(hydra): Bump database version 2025-09-17 21:32:46 +01:00
nexy7574
edb92f021b fix(hydra): Tackle most open code review comments 2025-09-17 21:32:46 +01:00
nexy7574
40ebe37992 fix(hydra): Stop enforcing unfederated v12 rooms 2025-09-17 21:32:46 +01:00
nexy7574
a27659c73f fix(hydra): Don't serialise null room IDs in PDUs 2025-09-17 21:32:46 +01:00
nexy7574
fa460fe97c style: Post-rebase formatting 2025-09-17 21:32:46 +01:00
nexy7574
c2620ba57b style(hydra): Satisfy clippy's twisted and confusing demands 2025-09-17 21:32:46 +01:00
nexy7574
4024349424 fix(hydra): Backfill server selection in v12 2025-09-17 21:32:46 +01:00
nexy7574
240088c1f5 fix(hydra): Unable to parse backfilled incoming create events 2025-09-17 21:32:46 +01:00
nexy7574
91229ac3bf fix(hydra): Working? State res v2.1 2025-09-17 21:32:46 +01:00
nexy7574
854e5f7199 style: Reformat and whatnot 2025-09-17 21:32:46 +01:00
nexy7574
96a58f6d69 feat(hydra): Initial public commit for v12 support
# Conflicts:
#	src/core/info/room_version.rs
#	src/service/rooms/timeline/create.rs

# Conflicts:
#	Cargo.lock
#	src/core/matrix/state_res/event_auth.rs

# Conflicts:
#	Cargo.toml
2025-09-17 21:32:46 +01:00
116 changed files with 2899 additions and 4093 deletions

View File

@@ -61,16 +61,14 @@ runs:
id: meta
uses: docker/metadata-action@v5
with:
flavor: |
suffix=${{ inputs.tag_suffix }},onlatest=true
tags: |
type=semver,pattern={{version}},prefix=v
type=semver,pattern={{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }},prefix=v
type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }},prefix=v
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }},
type=ref,event=pr
type=sha,format=short
type=raw,value=latest${{ inputs.tag_suffix }},enable=${{ startsWith(github.ref, 'refs/tags/v') }},priority=1100
type=semver,pattern={{version}},prefix=v,suffix=${{ inputs.tag_suffix }}
type=semver,pattern={{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }},prefix=v,suffix=${{ inputs.tag_suffix }}
type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }},prefix=v,suffix=${{ inputs.tag_suffix }}
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }},suffix=${{ inputs.tag_suffix }}
type=ref,event=pr,suffix=${{ inputs.tag_suffix }}
type=sha,format=short,suffix=${{ inputs.tag_suffix }}
type=raw,value=latest${{ inputs.tag_suffix }},enable=${{ startsWith(github.ref, 'refs/tags/v') }}
images: ${{ inputs.images }}
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
env:
@@ -83,7 +81,6 @@ runs:
env:
IMAGES: ${{ inputs.images }}
run: |
set -o xtrace
IFS=$'\n'
IMAGES_LIST=($IMAGES)
ANNOTATIONS_LIST=($DOCKER_METADATA_OUTPUT_ANNOTATIONS)
@@ -101,7 +98,6 @@ runs:
env:
IMAGES: ${{ inputs.images }}
run: |
set -o xtrace
IMAGES_LIST=($IMAGES)
for REPO in "${IMAGES_LIST[@]}"; do
docker buildx imagetools inspect $REPO:${{ steps.meta.outputs.version }}

View File

@@ -0,0 +1,58 @@
name: detect-runner-os
description: |
Detect the actual OS name and version of the runner.
Provides separate outputs for name, version, and a combined slug.
outputs:
name:
description: 'OS name (e.g. Ubuntu, Debian)'
value: ${{ steps.detect.outputs.name }}
version:
description: 'OS version (e.g. 22.04, 11)'
value: ${{ steps.detect.outputs.version }}
slug:
description: 'Combined OS slug (e.g. Ubuntu-22.04)'
value: ${{ steps.detect.outputs.slug }}
node_major:
description: 'Major version of Node.js if available (e.g. 22)'
value: ${{ steps.detect.outputs.node_major }}
node_version:
description: 'Full Node.js version if available (e.g. 22.19.0)'
value: ${{ steps.detect.outputs.node_version }}
runs:
using: composite
steps:
- name: Detect runner OS
id: detect
shell: bash
run: |
# Detect OS version (try lsb_release first, fall back to /etc/os-release)
OS_VERSION=$(lsb_release -rs 2>/dev/null || grep VERSION_ID /etc/os-release | cut -d'"' -f2)
# Detect OS name and capitalise (try lsb_release first, fall back to /etc/os-release)
OS_NAME=$(lsb_release -is 2>/dev/null || grep "^ID=" /etc/os-release | cut -d'=' -f2 | tr -d '"' | sed 's/\b\(.\)/\u\1/g')
# Create combined slug
OS_SLUG="${OS_NAME}-${OS_VERSION}"
# Detect Node.js version if available
if command -v node >/dev/null 2>&1; then
NODE_VERSION=$(node --version | sed 's/v//')
NODE_MAJOR=$(echo $NODE_VERSION | cut -d. -f1)
echo "node_version=${NODE_VERSION}" >> $GITHUB_OUTPUT
echo "node_major=${NODE_MAJOR}" >> $GITHUB_OUTPUT
echo "🔍 Detected Node.js: v${NODE_VERSION}"
else
echo "node_version=" >> $GITHUB_OUTPUT
echo "node_major=" >> $GITHUB_OUTPUT
echo "🔍 Node.js not found"
fi
# Set OS outputs
echo "name=${OS_NAME}" >> $GITHUB_OUTPUT
echo "version=${OS_VERSION}" >> $GITHUB_OUTPUT
echo "slug=${OS_SLUG}" >> $GITHUB_OUTPUT
# Log detection results
echo "🔍 Detected Runner OS: ${OS_NAME} ${OS_VERSION}"

View File

@@ -121,7 +121,7 @@ runs:
.cargo/git/checkouts
.cargo/registry
.cargo/registry/src
key: continuwuity-rust-registry-image-${{hashFiles('**/Cargo.lock') }}
key: rust-registry-image-${{hashFiles('**/Cargo.lock') }}
- name: Cache cargo target
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
@@ -130,7 +130,7 @@ runs:
with:
path: |
cargo-target${{ env.CPU_SUFFIX }}-${{ inputs.slug }}-${{ inputs.profile }}
key: continuwuity-cargo-target${{ env.CPU_SUFFIX }}-${{ inputs.slug }}-${{ inputs.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
key: cargo-target${{ env.CPU_SUFFIX }}-${{ inputs.slug }}-${{ inputs.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
- name: Cache apt cache
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
@@ -139,7 +139,7 @@ runs:
with:
path: |
var-cache-apt-${{ inputs.slug }}
key: continuwuity-var-cache-apt-${{ inputs.slug }}
key: var-cache-apt-${{ inputs.slug }}
- name: Cache apt lib
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
@@ -148,7 +148,7 @@ runs:
with:
path: |
var-lib-apt-${{ inputs.slug }}
key: continuwuity-var-lib-apt-${{ inputs.slug }}
key: var-lib-apt-${{ inputs.slug }}
- name: inject cache into docker
if: ${{ env.BUILDKIT_ENDPOINT == '' }}

View File

@@ -40,7 +40,7 @@ runs:
!~/.rustup/tmp
!~/.rustup/downloads
# Requires repo to be cloned if toolchain is not specified
key: continuwuity-${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
- name: Install Rust toolchain
if: steps.rustup-version.outputs.version == ''
shell: bash

View File

@@ -29,7 +29,7 @@ runs:
steps:
- name: Detect runner OS
id: runner-os
uses: https://git.tomfos.tr/actions/detect-versions@v1
uses: ./.forgejo/actions/detect-runner-os
- name: Configure cross-compilation architecture
if: inputs.dpkg-arch != ''
@@ -69,7 +69,7 @@ runs:
/usr/lib/x86_64-linux-gnu/libclang*.so*
/etc/apt/sources.list.d/archive_uri-*
/etc/apt/trusted.gpg.d/apt.llvm.org.asc
key: continuwuity-llvm-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-v${{ inputs.llvm-version }}-${{ hashFiles('**/Cargo.lock', 'rust-toolchain.toml') }}
key: llvm-${{ steps.runner-os.outputs.slug }}-v${{ inputs.llvm-version }}-v3-${{ hashFiles('**/Cargo.lock', 'rust-toolchain.toml') }}
- name: End LLVM cache group
shell: bash

View File

@@ -17,9 +17,9 @@ inputs:
required: false
default: ''
rust-version:
description: 'Rust version to install (e.g. nightly). Defaults to the version specified in rust-toolchain.toml'
description: 'Rust version to install (e.g. nightly). Defaults to 1.87.0'
required: false
default: ''
default: '1.87.0'
sccache-cache-limit:
description: 'Maximum size limit for sccache local cache (e.g. 2G, 500M)'
required: false
@@ -39,7 +39,7 @@ runs:
steps:
- name: Detect runner OS
id: runner-os
uses: https://git.tomfos.tr/actions/detect-versions@v1
uses: ./.forgejo/actions/detect-runner-os
- name: Configure Cargo environment
shell: bash
@@ -59,20 +59,9 @@ runs:
mkdir -p "${{ github.workspace }}/target"
mkdir -p "${{ github.workspace }}/.rustup"
- name: Start registry/toolchain restore group
- name: Start cache restore group
shell: bash
run: echo "::group::📦 Restoring registry and toolchain caches"
- name: Cache toolchain binaries
id: toolchain-cache
uses: actions/cache@v4
with:
path: |
.cargo/bin
.rustup/toolchains
.rustup/update-hashes
# Shared toolchain cache across all Rust versions
key: continuwuity-toolchain-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}
run: echo "::group::📦 Restoring caches (registry, toolchain, build artifacts)"
- name: Cache Cargo registry and git
id: registry-cache
@@ -84,17 +73,49 @@ runs:
.cargo/git/db
# Registry cache saved per workflow, restored from any workflow's cache
# Each workflow maintains its own registry that accumulates its needed crates
key: continuwuity-cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ github.workflow }}
key: cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ github.workflow }}
restore-keys: |
continuwuity-cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-
cargo-registry-${{ steps.runner-os.outputs.slug }}-
- name: End registry/toolchain restore group
- name: Cache toolchain binaries
id: toolchain-cache
uses: actions/cache@v4
with:
path: |
.cargo/bin
.rustup/toolchains
.rustup/update-hashes
# Shared toolchain cache across all Rust versions
key: toolchain-${{ steps.runner-os.outputs.slug }}
- name: Setup sccache
uses: https://git.tomfos.tr/tom/sccache-action@v1
- name: Cache build artifacts
id: build-cache
uses: actions/cache@v4
with:
path: |
target/**/deps
!target/**/deps/*.rlib
target/**/build
target/**/.fingerprint
target/**/incremental
target/**/*.d
/timelord/
# Build artifacts - cache per code change, restore from deps when code changes
key: >-
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
restore-keys: |
build-${{ steps.runner-os.outputs.slug }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
- name: End cache restore group
shell: bash
run: echo "::endgroup::"
- name: Setup Rust toolchain
shell: bash
id: rust-setup
run: |
# Install rustup if not already cached
if ! command -v rustup &> /dev/null; then
@@ -122,68 +143,8 @@ runs:
echo "::group::📦 Setting up Rust from rust-toolchain.toml"
rustup show
fi
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
echo "::endgroup::"
- name: Install Rust components
if: inputs.rust-components != ''
shell: bash
run: |
echo "📦 Installing components: ${{ inputs.rust-components }}"
rustup component add ${{ inputs.rust-components }}
- name: Install Rust target
if: inputs.rust-target != ''
shell: bash
run: |
echo "📦 Installing target: ${{ inputs.rust-target }}"
rustup target add ${{ inputs.rust-target }}
- name: Start build cache restore group
shell: bash
run: echo "::group::📦 Restoring build cache"
- name: Setup sccache
uses: https://git.tomfos.tr/tom/sccache-action@v1
- name: Cache dependencies
id: deps-cache
uses: actions/cache@v4
with:
path: |
target/**/.fingerprint
target/**/deps
target/**/*.d
target/**/.cargo-lock
target/**/CACHEDIR.TAG
target/**/.rustc_info.json
/timelord/
# Dependencies cache - based on Cargo.lock, survives source code changes
key: >-
continuwuity-deps-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ steps.rust-setup.outputs.version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}
restore-keys: |
continuwuity-deps-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ steps.rust-setup.outputs.version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-
- name: Cache incremental compilation
id: incremental-cache
uses: actions/cache@v4
with:
path: |
target/**/incremental
# Incremental cache - based on source code changes
key: >-
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ steps.rust-setup.outputs.version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
restore-keys: |
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ steps.rust-setup.outputs.version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ steps.rust-setup.outputs.version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-
- name: End build cache restore group
shell: bash
run: echo "::endgroup::"
- name: Configure PATH and install tools
shell: bash
env:
@@ -237,9 +198,27 @@ runs:
echo "CARGO_INCREMENTAL_GC_THRESHOLD=5" >> $GITHUB_ENV
fi
- name: Output version and summary
- name: Install Rust components
if: inputs.rust-components != ''
shell: bash
run: |
echo "📦 Installing components: ${{ inputs.rust-components }}"
rustup component add ${{ inputs.rust-components }}
- name: Install Rust target
if: inputs.rust-target != ''
shell: bash
run: |
echo "📦 Installing target: ${{ inputs.rust-target }}"
rustup target add ${{ inputs.rust-target }}
- name: Output version and summary
id: rust-setup
shell: bash
run: |
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
echo "📋 Setup complete:"
echo " Rust: $(rustc --version)"
echo " Cargo: $(cargo --version)"

View File

@@ -36,7 +36,7 @@ runs:
path: |
/usr/share/rust/.cargo/bin
~/.cargo/bin
key: continuwuity-timelord-binaries
key: timelord-binaries-v3
- name: Check if binaries need installation
shell: bash
@@ -82,7 +82,7 @@ runs:
path: |
/usr/share/rust/.cargo/bin
~/.cargo/bin
key: continuwuity-timelord-binaries
key: timelord-binaries-v3
- name: Restore timelord cache with fallbacks
@@ -92,7 +92,7 @@ runs:
path: ${{ env.TIMELORD_CACHE_PATH }}
key: ${{ env.TIMELORD_KEY }}
restore-keys: |
continuwuity-timelord-${{ github.repository }}-
timelord-v1-${{ github.repository }}-
- name: Initialize timestamps on complete cache miss
if: steps.timelord-restore.outputs.cache-hit != 'true'

View File

@@ -40,15 +40,6 @@ creds:
- registry: registry.gitlab.com
user: "{{env \"GITLAB_USERNAME\"}}"
pass: "{{env \"GITLAB_TOKEN\"}}"
- registry: git.nexy7574.co.uk
user: "{{env \"N7574_GIT_USERNAME\"}}"
pass: "{{env \"N7574_GIT_TOKEN\"}}"
- registry: ghcr.io
user: "{{env \"GH_PACKAGES_USER\"}}"
pass: "{{env \"GH_PACKAGES_TOKEN\"}}"
- registry: docker.io
user: "{{env \"DOCKER_MIRROR_USER\"}}"
pass: "{{env \"DOCKER_MIRROR_TOKEN\"}}"
# Global defaults
defaults:
@@ -62,15 +53,3 @@ sync:
target: registry.gitlab.com/continuwuity/continuwuity
type: repository
<<: *tags-main
- source: *source
target: git.nexy7574.co.uk/mirrored/continuwuity
type: repository
<<: *tags-releases
- source: *source
target: ghcr.io/continuwuity/continuwuity
type: repository
<<: *tags-main
- source: *source
target: docker.io/jadedblueeyes/continuwuity
type: repository
<<: *tags-main

View File

@@ -1,148 +0,0 @@
name: Build / Debian DEB
concurrency:
group: "build-debian-${{ forge.ref }}"
cancel-in-progress: true
on:
push:
tags:
- "v*.*.*"
workflow_dispatch:
schedule:
- cron: '30 0 * * *'
jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
container: ["ubuntu-latest", "ubuntu-previous", "debian-latest", "debian-oldstable"]
container:
image: "ghcr.io/tcpipuk/act-runner:${{ matrix.container }}"
steps:
- name: Get Debian version
id: debian-version
run: |
VERSION=$(cat /etc/debian_version)
DISTRIBUTION=$(lsb_release -sc 2>/dev/null)
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "distribution=$DISTRIBUTION" >> $GITHUB_OUTPUT
echo "Debian distribution: $DISTRIBUTION ($VERSION)"
- name: Checkout repository with full history
uses: https://code.forgejo.org/actions/checkout@v5
with:
fetch-depth: 0
- name: Cache Cargo registry
uses: https://code.forgejo.org/actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
key: cargo-debian-${{ steps.debian-version.outputs.distribution }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
cargo-debian-${{ steps.debian-version.outputs.distribution }}-
- name: Setup sccache
uses: https://git.tomfos.tr/tom/sccache-action@v1
- name: Configure sccache environment
run: |
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "SCCACHE_CACHE_SIZE=10G" >> $GITHUB_ENV
# Aggressive GC since cache restores don't increment counter
echo "CARGO_INCREMENTAL_GC_TRIGGER=5" >> $GITHUB_ENV
- name: Setup Rust nightly
uses: ./.forgejo/actions/setup-rust
with:
rust-version: nightly
github-token: ${{ secrets.GH_PUBLIC_RO }}
- name: Get package version and component
id: package-meta
run: |
BASE_VERSION=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.name == \"conduwuit\").version" | sed 's/[^a-zA-Z0-9.+]/~/g')
# VERSION is the package version, COMPONENT is used in
# apt's repository config like a git repo branch
if [[ "${{ forge.ref }}" == "refs/tags/"* ]]; then
# Use the "stable" component for tagged releases
COMPONENT="stable"
VERSION=$BASE_VERSION
else
# Use the "dev" component for development builds
SHA=$(echo "${{ forge.sha }}" | cut -c1-7)
DATE=$(date +%Y%m%d)
if [ "${{ forge.ref_name }}" = "main" ]; then
COMPONENT="dev"
else
# Use the sanitized ref name as the component for feature branches
COMPONENT="dev-$(echo '${{ forge.ref_name }}' | sed 's/[^a-zA-Z0-9.+]/-/g' | tr '[:upper:]' '[:lower:]' | cut -c1-30)"
fi
CLEAN_COMPONENT=$(echo $COMPONENT | sed 's/[^a-zA-Z0-9.+]/~/g')
VERSION="$BASE_VERSION~git$DATE.$SHA-$CLEAN_COMPONENT"
fi
echo "component=$COMPONENT" >> $GITHUB_OUTPUT
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "Component: $COMPONENT"
echo "Version: $VERSION"
- name: Install cargo-deb
run: |
if command -v cargo-deb &> /dev/null; then
echo "cargo-deb already available"
else
echo "Installing cargo-deb"
cargo-binstall -y --no-symlinks cargo-deb
fi
- name: Install build dependencies
run: |
apt-get update -y
# Build dependencies for rocksdb
apt-get install -y clang liburing-dev
- name: Run cargo-deb
id: cargo-deb
run: |
DEB_PATH=$(cargo deb --deb-version ${{ steps.package-meta.outputs.version }})
echo "path=$DEB_PATH" >> $GITHUB_OUTPUT
- name: Test deb installation
run: |
echo "Installing: ${{ steps.cargo-deb.outputs.path }}"
apt-get install -y ${{ steps.cargo-deb.outputs.path }}
dpkg -s continuwuity
[ -f /usr/bin/conduwuit ] && echo "✅ Binary installed successfully"
[ -f /usr/lib/systemd/system/conduwuit.service ] && echo "✅ Systemd service installed"
[ -f /etc/conduwuit/conduwuit.toml ] && echo "✅ Config file installed"
- name: Upload deb artifact
uses: https://code.forgejo.org/actions/upload-artifact@v3
with:
name: continuwuity-${{ steps.debian-version.outputs.distribution }}
path: ${{ steps.cargo-deb.outputs.path }}
- name: Publish to Forgejo package registry
if: ${{ forge.event_name == 'push' || forge.event_name == 'workflow_dispatch' || forge.event_name == 'schedule' }}
run: |
OWNER="continuwuation"
DISTRIBUTION=${{ steps.debian-version.outputs.distribution }}
COMPONENT=${{ steps.package-meta.outputs.component }}
DEB=${{ steps.cargo-deb.outputs.path }}
echo "Publishing: $DEB in component $COMPONENT for distribution $DISTRIBUTION"
curl --fail-with-body \
-X PUT \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
--upload-file "$DEB" \
"${{ forge.server_url }}/api/packages/$OWNER/debian/pool/$DISTRIBUTION/$COMPONENT/upload"

View File

@@ -1,389 +0,0 @@
name: Build / Fedora RPM
concurrency:
group: "build-fedora-${{ github.ref }}"
cancel-in-progress: true
on:
push:
tags:
- "v*.*.*"
# paths:
# - 'pkg/fedora/**'
# - 'src/**'
# - 'Cargo.toml'
# - 'Cargo.lock'
# - '.forgejo/workflows/build-fedora.yml'
workflow_dispatch:
schedule:
- cron: '30 0 * * *'
jobs:
build:
runs-on: fedora-latest
steps:
- name: Detect Fedora version
id: fedora
run: |
VERSION=$(rpm -E %fedora)
echo "version=$VERSION" >> $GITHUB_OUTPUT
echo "Fedora version: $VERSION"
- name: Checkout repository with full history
uses: https://code.forgejo.org/actions/checkout@v5
with:
fetch-depth: 0
- name: Cache DNF packages
uses: https://code.forgejo.org/actions/cache@v4
with:
path: |
/var/cache/dnf
/var/cache/yum
key: dnf-fedora${{ steps.fedora.outputs.version }}-${{ hashFiles('pkg/fedora/continuwuity.spec.rpkg') }}-v1
restore-keys: |
dnf-fedora${{ steps.fedora.outputs.version }}-
- name: Cache Cargo registry
uses: https://code.forgejo.org/actions/cache@v4
with:
path: |
~/.cargo/registry
~/.cargo/git
key: cargo-fedora${{ steps.fedora.outputs.version }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
cargo-fedora${{ steps.fedora.outputs.version }}-
- name: Cache Rust build dependencies
uses: https://code.forgejo.org/actions/cache@v4
with:
path: |
~/rpmbuild/BUILD/*/target/release/deps
~/rpmbuild/BUILD/*/target/release/build
~/rpmbuild/BUILD/*/target/release/.fingerprint
~/rpmbuild/BUILD/*/target/release/incremental
key: rust-deps-fedora${{ steps.fedora.outputs.version }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
rust-deps-fedora${{ steps.fedora.outputs.version }}-
- name: Setup sccache
uses: https://git.tomfos.tr/tom/sccache-action@v1
- name: Configure sccache environment
run: |
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
echo "SCCACHE_CACHE_SIZE=10G" >> $GITHUB_ENV
# Aggressive GC since cache restores don't increment counter
echo "CARGO_INCREMENTAL_GC_TRIGGER=5" >> $GITHUB_ENV
- name: Install base RPM tools
run: |
dnf install -y --setopt=keepcache=1 \
fedora-packager \
python3-pip \
rpm-sign \
rpkg \
wget
- name: Setup build environment and build SRPM
run: |
git config --global --add safe.directory "$GITHUB_WORKSPACE"
git config --global user.email "ci@continuwuity.org"
git config --global user.name "Continuwuity"
rpmdev-setuptree
cd "$GITHUB_WORKSPACE"
# Determine release suffix and version based on ref type and branch
if [[ "${{ github.ref }}" == "refs/tags/"* ]]; then
# Tags get clean version numbers for stable releases
RELEASE_SUFFIX=""
TAG_NAME="${{ github.ref_name }}"
# Extract version from tag (remove v prefix if present)
TAG_VERSION=$(echo "$TAG_NAME" | sed 's/^v//')
# Create spec file with tag version
sed -e "s/^Version:.*$/Version: $TAG_VERSION/" \
-e "s/^Release:.*$/Release: 1%{?dist}/" \
pkg/fedora/continuwuity.spec.rpkg > continuwuity.spec.rpkg
elif [ "${{ github.ref_name }}" = "main" ]; then
# Main branch gets .dev suffix
RELEASE_SUFFIX=".dev"
# Replace the Release line to include our suffix
sed "s/^Release:.*$/Release: 1${RELEASE_SUFFIX}%{?dist}/" \
pkg/fedora/continuwuity.spec.rpkg > continuwuity.spec.rpkg
else
# Other branches get sanitized branch name as suffix
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | sed 's/[^a-zA-Z0-9]/_/g' | cut -c1-20)
RELEASE_SUFFIX=".${SAFE_BRANCH}"
# Replace the Release line to include our suffix
sed "s/^Release:.*$/Release: 1${RELEASE_SUFFIX}%{?dist}/" \
pkg/fedora/continuwuity.spec.rpkg > continuwuity.spec.rpkg
fi
rpkg srpm --outdir "$HOME/rpmbuild/SRPMS"
ls -la $HOME/rpmbuild/SRPMS/
- name: Install build dependencies from SRPM
run: |
SRPM=$(find "$HOME/rpmbuild/SRPMS" -name "*.src.rpm" | head -1)
if [ -z "$SRPM" ]; then
echo "Error: No SRPM file found"
exit 1
fi
echo "Installing build dependencies from: $(basename $SRPM)"
dnf builddep -y "$SRPM"
- name: Build RPM from SRPM
run: |
SRPM=$(find "$HOME/rpmbuild/SRPMS" -name "*.src.rpm" | head -1)
if [ -z "$SRPM" ]; then
echo "Error: No SRPM file found"
exit 1
fi
echo "Building from SRPM: $SRPM"
rpmbuild --rebuild "$SRPM" \
--define "_topdir $HOME/rpmbuild" \
--define "_sourcedir $GITHUB_WORKSPACE" \
--nocheck # Skip %check section to avoid test dependencies
- name: Test RPM installation
run: |
# Find the main binary RPM (exclude debug and source RPMs)
RPM=$(find "$HOME/rpmbuild/RPMS" -name "continuwuity-*.rpm" \
! -name "*debuginfo*" \
! -name "*debugsource*" \
! -name "*.src.rpm" | head -1)
if [ -z "$RPM" ]; then
echo "Error: No binary RPM file found"
exit 1
fi
echo "Testing installation of: $RPM"
# Dry run first
rpm -qpi "$RPM"
echo ""
rpm -qpl "$RPM"
# Actually install it
dnf install -y "$RPM"
# Verify installation
rpm -qa | grep continuwuity
# Check that the binary exists
[ -f /usr/bin/conduwuit ] && echo "✅ Binary installed successfully"
[ -f /usr/lib/systemd/system/conduwuit.service ] && echo "✅ Systemd service installed"
[ -f /etc/conduwuit/conduwuit.toml ] && echo "✅ Config file installed"
- name: List built packages
run: |
echo "Binary RPMs:"
find "$HOME/rpmbuild/RPMS" -name "*.rpm" -type f -exec ls -la {} \;
echo ""
echo "Source RPMs:"
find "$HOME/rpmbuild/SRPMS" -name "*.rpm" -type f -exec ls -la {} \;
- name: Collect artifacts
run: |
mkdir -p artifacts
find "$HOME/rpmbuild/RPMS" -name "*.rpm" -type f -exec cp {} artifacts/ \;
find "$HOME/rpmbuild/SRPMS" -name "*.rpm" -type f -exec cp {} artifacts/ \;
cd artifacts
echo "Build Information:" > BUILD_INFO.txt
echo "==================" >> BUILD_INFO.txt
echo "Git commit: ${{ github.sha }}" >> BUILD_INFO.txt
echo "Git branch: ${{ github.ref_name }}" >> BUILD_INFO.txt
echo "Build date: $(date -u +%Y-%m-%d_%H:%M:%S_UTC)" >> BUILD_INFO.txt
echo "" >> BUILD_INFO.txt
echo "Package contents:" >> BUILD_INFO.txt
echo "-----------------" >> BUILD_INFO.txt
for rpm in *.rpm; do
echo "" >> BUILD_INFO.txt
echo "File: $rpm" >> BUILD_INFO.txt
rpm -qpi "$rpm" 2>/dev/null | grep -E "^(Name|Version|Release|Architecture|Size)" >> BUILD_INFO.txt
done
ls -la
- name: Upload binary RPM artifact
run: |
# Find the main binary RPM (exclude debug and source RPMs)
BIN_RPM=$(find artifacts -name "continuwuity-*.rpm" \
! -name "*debuginfo*" \
! -name "*debugsource*" \
! -name "*.src.rpm" \
-type f)
mkdir -p upload-bin
cp $BIN_RPM upload-bin/
- name: Upload binary RPM
uses: https://code.forgejo.org/actions/upload-artifact@v3
with:
name: continuwuity
path: upload-bin/
- name: Upload debug RPM artifact
uses: https://code.forgejo.org/actions/upload-artifact@v3
with:
name: continuwuity-debug
path: artifacts/*debuginfo*.rpm
- name: Publish to RPM Package Registry
if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' }}
run: |
# Find the main binary RPM (exclude debug and source RPMs)
RPM=$(find artifacts -name "continuwuity-*.rpm" \
! -name "*debuginfo*" \
! -name "*debugsource*" \
! -name "*.src.rpm" \
-type f | head -1)
if [ -z "$RPM" ]; then
echo "No binary RPM found to publish"
exit 0
fi
RPM_BASENAME=$(basename "$RPM")
echo "Publishing: $RPM_BASENAME"
# Determine the group based on ref type and branch
if [[ "${{ github.ref }}" == "refs/tags/"* ]]; then
GROUP="stable"
# For tags, extract the tag name for version info
TAG_NAME="${{ github.ref_name }}"
elif [ "${{ github.ref_name }}" = "main" ]; then
GROUP="dev"
else
# Use sanitized branch name as group for feature branches
GROUP=$(echo "${{ github.ref_name }}" | sed 's/[^a-zA-Z0-9]/-/g' | tr '[:upper:]' '[:lower:]' | cut -c1-30)
fi
PACKAGE_INFO=$(rpm -qpi "$RPM" 2>/dev/null)
PACKAGE_NAME=$(echo "$PACKAGE_INFO" | grep "^Name" | awk '{print $3}')
PACKAGE_VERSION=$(echo "$PACKAGE_INFO" | grep "^Version" | awk '{print $3}')
PACKAGE_RELEASE=$(echo "$PACKAGE_INFO" | grep "^Release" | awk '{print $3}')
PACKAGE_ARCH=$(echo "$PACKAGE_INFO" | grep "^Architecture" | awk '{print $2}')
# Full version includes release
FULL_VERSION="${PACKAGE_VERSION}-${PACKAGE_RELEASE}"
# Forgejo's RPM registry cannot overwrite existing packages, so we must delete first
# 404 is OK if package doesn't exist yet
echo "Removing any existing package: $PACKAGE_NAME-$FULL_VERSION.$PACKAGE_ARCH"
RESPONSE=$(curl -s -w "\n%{http_code}" -X DELETE \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/$GROUP/package/$PACKAGE_NAME/$FULL_VERSION/$PACKAGE_ARCH")
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
if [ "$HTTP_CODE" != "204" ] && [ "$HTTP_CODE" != "404" ]; then
echo "ERROR: Failed to delete package (HTTP $HTTP_CODE)"
echo "$RESPONSE" | head -n -1
exit 1
fi
curl --fail-with-body \
-X PUT \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
-H "Content-Type: application/x-rpm" \
-T "$RPM" \
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/$GROUP/upload?sign=true"
echo ""
echo "✅ Published binary RPM to: https://forgejo.ellis.link/continuwuation/-/packages/rpm/continuwuity/"
echo "Group: $GROUP"
# Upload debug RPMs to separate group
DEBUG_RPMS=$(find artifacts -name "*debuginfo*.rpm")
if [ -n "$DEBUG_RPMS" ]; then
echo ""
echo "Publishing debug RPMs to group: ${GROUP}-debug"
for DEBUG_RPM in $DEBUG_RPMS; do
echo "Publishing: $(basename "$DEBUG_RPM")"
DEBUG_INFO=$(rpm -qpi "$DEBUG_RPM" 2>/dev/null)
DEBUG_NAME=$(echo "$DEBUG_INFO" | grep "^Name" | awk '{print $3}')
DEBUG_VERSION=$(echo "$DEBUG_INFO" | grep "^Version" | awk '{print $3}')
DEBUG_RELEASE=$(echo "$DEBUG_INFO" | grep "^Release" | awk '{print $3}')
DEBUG_ARCH=$(echo "$DEBUG_INFO" | grep "^Architecture" | awk '{print $2}')
DEBUG_FULL_VERSION="${DEBUG_VERSION}-${DEBUG_RELEASE}"
# Must delete existing package first (Forgejo limitation)
RESPONSE=$(curl -s -w "\n%{http_code}" -X DELETE \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/${GROUP}-debug/package/$DEBUG_NAME/$DEBUG_FULL_VERSION/$DEBUG_ARCH")
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
if [ "$HTTP_CODE" != "204" ] && [ "$HTTP_CODE" != "404" ]; then
echo "ERROR: Failed to delete debug package (HTTP $HTTP_CODE)"
echo "$RESPONSE" | head -n -1
exit 1
fi
curl --fail-with-body \
-X PUT \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
-H "Content-Type: application/x-rpm" \
-T "$DEBUG_RPM" \
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/${GROUP}-debug/upload?sign=true"
done
echo "✅ Published debug RPMs to group: ${GROUP}-debug"
fi
# Also upload the SRPM to separate group
SRPM=$(find artifacts -name "*.src.rpm" | head -1)
if [ -n "$SRPM" ]; then
echo ""
echo "Publishing source RPM: $(basename "$SRPM")"
echo "Publishing to group: ${GROUP}-src"
SRPM_INFO=$(rpm -qpi "$SRPM" 2>/dev/null)
SRPM_NAME=$(echo "$SRPM_INFO" | grep "^Name" | awk '{print $3}')
SRPM_VERSION=$(echo "$SRPM_INFO" | grep "^Version" | awk '{print $3}')
SRPM_RELEASE=$(echo "$SRPM_INFO" | grep "^Release" | awk '{print $3}')
SRPM_FULL_VERSION="${SRPM_VERSION}-${SRPM_RELEASE}"
# Must delete existing SRPM first (Forgejo limitation)
echo "Removing any existing SRPM: $SRPM_NAME-$SRPM_FULL_VERSION.src"
RESPONSE=$(curl -s -w "\n%{http_code}" -X DELETE \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/${GROUP}-src/package/$SRPM_NAME/$SRPM_FULL_VERSION/src")
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
if [ "$HTTP_CODE" != "204" ] && [ "$HTTP_CODE" != "404" ]; then
echo "ERROR: Failed to delete SRPM (HTTP $HTTP_CODE)"
echo "$RESPONSE" | head -n -1
exit 1
fi
curl --fail-with-body \
-X PUT \
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
-H "Content-Type: application/x-rpm" \
-T "$SRPM" \
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/${GROUP}-src/upload?sign=true"
echo "✅ Published source RPM to group: ${GROUP}-src"
fi

View File

@@ -51,11 +51,11 @@ jobs:
- name: Detect runner environment
id: runner-env
uses: https://git.tomfos.tr/actions/detect-versions@v1
uses: ./.forgejo/actions/detect-runner-os
- name: Setup Node.js
if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20'
uses: https://github.com/actions/setup-node@v6
uses: https://github.com/actions/setup-node@v5
with:
node-version: 22
@@ -63,7 +63,9 @@ jobs:
uses: actions/cache@v3
with:
path: ~/.npm
key: continuwuity-${{ steps.runner-env.outputs.slug }}-${{ steps.runner-env.outputs.arch }}-node-${{ steps.runner-env.outputs.node_version }}
key: ${{ steps.runner-env.outputs.slug }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ steps.runner-env.outputs.slug }}-node-
- name: Install dependencies
run: npm install --save-dev wrangler@latest

View File

@@ -24,7 +24,7 @@ jobs:
steps:
- name: 📦 Setup Node.js
uses: https://github.com/actions/setup-node@v6
uses: https://github.com/actions/setup-node@v5
with:
node-version: "22"

View File

@@ -11,13 +11,7 @@ on:
required: false
default: false
type: boolean
push:
branches:
- main
paths:
# Re-run when config changes
- '.forgejo/regsync/regsync.yml'
- '.forgejo/workflows/mirror-images.yml'
concurrency:
group: "mirror-images"
cancel-in-progress: true
@@ -30,27 +24,12 @@ jobs:
BUILTIN_REGISTRY_PASSWORD: ${{ secrets.BUILTIN_REGISTRY_PASSWORD }}
GITLAB_USERNAME: ${{ vars.GITLAB_USERNAME }}
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
N7574_GIT_USERNAME: ${{ vars.N7574_GIT_USERNAME }}
N7574_GIT_TOKEN: ${{ secrets.N7574_GIT_TOKEN }}
GH_PACKAGES_USER: ${{ vars.GH_PACKAGES_USER }}
GH_PACKAGES_TOKEN: ${{ secrets.GH_PACKAGES_TOKEN }}
DOCKER_MIRROR_USER: ${{ vars.DOCKER_MIRROR_USER }}
DOCKER_MIRROR_TOKEN: ${{ secrets.DOCKER_MIRROR_TOKEN }}
steps:
- name: Checkout repository
uses: actions/checkout@v5
with:
persist-credentials: false
# - uses: https://github.com/actions/create-github-app-token@v2
# id: app-token
# with:
# app-id: ${{ vars.GH_APP_ID }}
# private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
# github-api-url: https://api.github.com
# owner: continuwuity
# repositories: continuwuity
- name: Install regctl
uses: https://forgejo.ellis.link/continuwuation/regclient-actions/regctl-installer@main
with:

View File

@@ -3,6 +3,15 @@ concurrency:
group: "release-image-${{ github.ref }}"
on:
pull_request:
paths-ignore:
- "*.md"
- "**/*.md"
- ".gitlab-ci.yml"
- ".gitignore"
- "renovate.json"
- "pkg/**"
- "docs/**"
push:
branches:
- main
@@ -14,8 +23,6 @@ on:
- "renovate.json"
- "pkg/**"
- "docs/**"
tags:
- "v*.*.*"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:

View File

@@ -43,7 +43,7 @@ jobs:
name: Renovate
runs-on: ubuntu-latest
container:
image: ghcr.io/renovatebot/renovate:42.11.0@sha256:656c1e5b808279eac16c37b89562fb4c699e02fc7e219244f4a1fc2f0a7ce367
image: ghcr.io/renovatebot/renovate:41.115.6@sha256:70c89592d424a54bedf7538c5bea2e43f4d66ce2c8b74d1356d4cf0ee9ed7ec0
options: --tmpfs /tmp:exec
steps:
- name: Checkout
@@ -59,27 +59,27 @@ jobs:
with:
path: |
/tmp/renovate/cache/renovate/repository
key: renovate-repo-cache-${{ github.run_id }}
key: repo-cache-${{ github.run_id }}
restore-keys: |
renovate-repo-cache-
repo-cache-
- name: Restore renovate package cache
uses: actions/cache/restore@v4
with:
path: |
/tmp/renovate/cache/renovate/renovate-cache-sqlite
key: renovate-package-cache-${{ github.run_id }}
key: package-cache-${{ github.run_id }}
restore-keys: |
renovate-package-cache-
package-cache-
- name: Restore renovate OSV cache
uses: actions/cache/restore@v4
with:
path: |
/tmp/osv
key: renovate-osv-cache-${{ github.run_id }}
key: osv-cache-${{ github.run_id }}
restore-keys: |
renovate-osv-cache-
osv-cache-
- name: Self-hosted Renovate
run: renovate
@@ -113,7 +113,7 @@ jobs:
with:
path: |
/tmp/renovate/cache/renovate/repository
key: renovate-repo-cache-${{ github.run_id }}
key: repo-cache-${{ github.run_id }}
- name: Save renovate package cache
if: always()
@@ -121,7 +121,7 @@ jobs:
with:
path: |
/tmp/renovate/cache/renovate/renovate-cache-sqlite
key: renovate-package-cache-${{ github.run_id }}
key: package-cache-${{ github.run_id }}
- name: Save renovate OSV cache
if: always()
@@ -129,4 +129,4 @@ jobs:
with:
path: |
/tmp/osv
key: renovate-osv-cache-${{ github.run_id }}
key: osv-cache-${{ github.run_id }}

View File

@@ -1,121 +0,0 @@
name: Update flake hashes
on:
workflow_dispatch:
pull_request:
paths:
- "Cargo.lock"
- "Cargo.toml"
- "rust-toolchain.toml"
- "nix/**/*"
- ".forgejo/workflows/update-flake-hashes.yml"
jobs:
update-flake-hashes:
runs-on: ubuntu-latest
steps:
- uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
with:
fetch-depth: 0
fetch-tags: false
fetch-single-branch: true
submodules: false
persist-credentials: true
token: ${{ secrets.FORGEJO_TOKEN }}
- uses: https://github.com/cachix/install-nix-action@7ab6e7fd29da88e74b1e314a4ae9ac6b5cda3801 # v31.8.0
with:
nix_path: nixpkgs=channel:nixos-unstable
# We can skip getting a toolchain hash if this was ran as a dispatch with the intent
# to update just the rocksdb hash. If this was ran as a dispatch and the toolchain
# files are changed, we still update them, as well as the rocksdb import.
- name: Detect changed files
id: changes
run: |
git fetch origin ${{ github.base_ref }} --depth=1 || true
if [ -n "${{ github.event.pull_request.base.sha }}" ]; then
base=${{ github.event.pull_request.base.sha }}
else
base=$(git rev-parse HEAD~1)
fi
echo "Base: $base"
echo "HEAD: $(git rev-parse HEAD)"
git diff --name-only $base HEAD > changed_files.txt
echo "detected changes in $(cat changed_files.txt)"
# Join files with commas
files=$(paste -sd, changed_files.txt)
echo "files=$files" >> $FORGEJO_OUTPUT
- name: Debug output
run: |
echo "State of output"
echo "Changed files: ${{ steps.changes.outputs.files }}"
- name: Get new toolchain hash
if: contains(steps.changes.outputs.files, 'Cargo.toml') || contains(steps.changes.outputs.files, 'Cargo.lock') || contains(steps.changes.outputs.files, 'rust-toolchain.toml')
run: |
# Set the current sha256 to an empty hash to make `nix build` calculate a new one
awk '/fromToolchainFile *\{/{found=1; print; next} found && /sha256 =/{sub(/sha256 = .*/, "sha256 = lib.fakeSha256;"); found=0} 1' nix/packages/rust.nix > temp.nix
mv temp.nix nix/packages/rust.nix
# Build continuwuity and filter for the new hash
# We do `|| true` because we want this to fail without stopping the workflow
nix build .#default 2>&1 | tee >(grep 'got:' | awk '{print $2}' > new_toolchain_hash.txt) || true
# Place the new hash in place of the empty hash
new_hash=$(cat new_toolchain_hash.txt)
sed -i "s|lib.fakeSha256|\"$new_hash\"|" nix/packages/rust.nix
echo "New hash:"
awk -F'"' '/fromToolchainFile/{found=1; next} found && /sha256 =/{print $2; found=0}' nix/packages/rust.nix
echo "Expected new hash:"
cat new_toolchain_hash.txt
rm new_toolchain_hash.txt
- name: Get new rocksdb hash
if: contains(steps.changes.outputs.files, '.nix') || contains(steps.changes.outputs.files, 'flake.lock')
run: |
# Set the current sha256 to an empty hash to make `nix build` calculate a new one
awk '/repo = "rocksdb";/{found=1; print; next} found && /sha256 =/{sub(/sha256 = .*/, "sha256 = lib.fakeSha256;"); found=0} 1' nix/packages/rocksdb/package.nix > temp.nix
mv temp.nix nix/packages/rocksdb/package.nix
# Build continuwuity and filter for the new hash
# We do `|| true` because we want this to fail without stopping the workflow
nix build .#default 2>&1 | tee >(grep 'got:' | awk '{print $2}' > new_rocksdb_hash.txt) || true
# Place the new hash in place of the empty hash
new_hash=$(cat new_rocksdb_hash.txt)
sed -i "s|lib.fakeSha256|\"$new_hash\"|" nix/packages/rocksdb/package.nix
echo "New hash:"
awk -F'"' '/repo = "rocksdb";/{found=1; next} found && /sha256 =/{print $2; found=0}' nix/packages/rocksdb/package.nix
echo "Expected new hash:"
cat new_rocksdb_hash.txt
rm new_rocksdb_hash.txt
- name: Show diff
run: git diff flake.nix nix
- name: Push changes
run: |
set -euo pipefail
if git diff --quiet --exit-code; then
echo "No changes to commit."
exit 0
fi
git config user.email "renovate@mail.ellis.link"
git config user.name "renovate"
REF="${{ github.head_ref }}"
git fetch origin "$REF"
git checkout "$REF"
git commit -a -m "chore(Nix): Updated flake hashes"
git push origin HEAD:refs/heads/"$REF"

View File

@@ -7,7 +7,7 @@ default_stages:
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v6.0.0
rev: v5.0.0
hooks:
- id: fix-byte-order-marker
- id: check-case-conflict
@@ -23,7 +23,7 @@ repos:
- id: check-added-large-files
- repo: https://github.com/crate-ci/typos
rev: v1.39.2
rev: v1.26.0
hooks:
- id: typos
- id: typos

1977
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -21,7 +21,7 @@ license = "Apache-2.0"
readme = "README.md"
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
rust-version = "1.86.0"
version = "0.5.0-rc.8.1"
version = "0.5.0-rc.7"
[workspace.metadata.crane]
name = "conduwuit"
@@ -48,7 +48,7 @@ features = ["ffi", "std", "union"]
version = "0.7.0"
[workspace.dependencies.ctor]
version = "0.6.0"
version = "0.5.0"
[workspace.dependencies.cargo_toml]
version = "0.22"
@@ -166,8 +166,8 @@ default-features = false
features = ["raw_value"]
# Used for appservice registration files
[workspace.dependencies.serde-saphyr]
version = "0.0.8"
[workspace.dependencies.serde_yml]
version = "0.0.12"
# Used to load forbidden room/user regex from config
[workspace.dependencies.serde_regex]
@@ -210,13 +210,13 @@ default-features = false
version = "0.1.41"
default-features = false
[workspace.dependencies.tracing-subscriber]
version = "0.3.20"
version = "0.3.19"
default-features = false
features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"]
[workspace.dependencies.tracing-journald]
version = "0.3.1"
[workspace.dependencies.tracing-core]
version = "0.1.34"
version = "0.1.33"
default-features = false
# for URL previews
@@ -286,7 +286,7 @@ features = [
]
[workspace.dependencies.hyper-util]
version = "=0.1.17"
version = "0.1.11"
default-features = false
features = [
"server-auto",
@@ -351,7 +351,7 @@ version = "0.1.2"
# Used for matrix spec type definitions and helpers
[workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
rev = "50b2a91b2ab8f9830eea80b9911e11234e0eac66"
rev = "d18823471ab3c09e77ff03eea346d4c07e572654"
features = [
"compat",
"rand",
@@ -381,7 +381,6 @@ features = [
"unstable-msc4095",
"unstable-msc4121",
"unstable-msc4125",
"unstable-msc4155",
"unstable-msc4186",
"unstable-msc4203", # sending to-device events to appservices
"unstable-msc4210", # remove legacy mentions
@@ -412,27 +411,28 @@ default-features = false
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
[workspace.dependencies.opentelemetry]
version = "0.31.0"
version = "0.30.0"
[workspace.dependencies.tracing-flame]
version = "0.2.0"
[workspace.dependencies.tracing-opentelemetry]
version = "0.32.0"
version = "0.31.0"
[workspace.dependencies.opentelemetry_sdk]
version = "0.31.0"
version = "0.30.0"
features = ["rt-tokio"]
[workspace.dependencies.opentelemetry-otlp]
version = "0.31.0"
version = "0.30.0"
features = ["http", "trace", "logs", "metrics"]
[workspace.dependencies.opentelemetry-jaeger-propagator]
version = "0.30.0"
# optional sentry metrics for crash/panic reporting
[workspace.dependencies.sentry]
version = "0.45.0"
version = "0.42.0"
default-features = false
features = [
"backtrace",
@@ -448,9 +448,9 @@ features = [
]
[workspace.dependencies.sentry-tracing]
version = "0.45.0"
version = "0.42.0"
[workspace.dependencies.sentry-tower]
version = "0.45.0"
version = "0.42.0"
# jemalloc usage
[workspace.dependencies.tikv-jemalloc-sys]
@@ -476,7 +476,7 @@ default-features = false
features = ["use_std"]
[workspace.dependencies.console-subscriber]
version = "0.5"
version = "0.4"
[workspace.dependencies.nix]
version = "0.30.1"
@@ -550,9 +550,9 @@ features = ["std"]
version = "1.0.2"
[workspace.dependencies.ldap3]
version = "0.12.0"
version = "0.11.5"
default-features = false
features = ["sync", "tls-rustls", "rustls-provider"]
features = ["sync", "tls-rustls"]
#
# Patches
@@ -560,7 +560,19 @@ features = ["sync", "tls-rustls", "rustls-provider"]
# backport of [https://github.com/tokio-rs/tracing/pull/2956] to the 0.1.x branch of tracing.
# we can switch back to upstream if #2956 is merged and backported in the upstream repo.
# https://forgejo.ellis.link/continuwuation/tracing/commit/b348dca742af641c47bc390261f60711c2af573c
[patch.crates-io.tracing-subscriber]
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
[patch.crates-io.tracing]
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
[patch.crates-io.tracing-core]
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
[patch.crates-io.tracing-log]
git = "https://forgejo.ellis.link/continuwuation/tracing"
rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa"
# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/src/branch/main/.patchy/0002-add-tab-completion-callback.patch
# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/src/branch/main/.patchy/0001-add-event-for-ctrl.patch
@@ -584,7 +596,13 @@ rev = "9c8e51510c35077df888ee72a36b4b05637147da"
# reverts hyperium#148 conflicting with our delicate federation resolver hooks
[patch.crates-io.hyper-util]
git = "https://forgejo.ellis.link/continuwuation/hyper-util"
rev = "5886d5292bf704c246206ad72d010d674a7b77d0"
rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941"
# Allows no-aaaa option in resolv.conf
# Use 1-indexed line numbers when displaying parse error messages
[patch.crates-io.resolv-conf]
git = "https://forgejo.ellis.link/continuwuation/resolv-conf"
rev = "ebbbec1cb965b487a0150f5d007e96c05e3d72af"
#
# Our crates
@@ -944,7 +962,7 @@ semicolon_outside_block = "warn"
str_to_string = "warn"
string_lit_chars_any = "warn"
string_slice = "warn"
string_to_string = "warn"
suspicious_xor_used_as_pow = "warn"
tests_outside_test_module = "warn"
try_err = "warn"

View File

@@ -11,7 +11,7 @@ ## A community-driven [Matrix](https://matrix.org/) homeserver in Rust
<!-- ANCHOR_END: catchphrase -->
[continuwuity] is a Matrix homeserver written in Rust.
It's the official community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver.
It's a community continuation of the [conduwuit](https://github.com/girlbossceo/conduwuit) homeserver.
<!-- ANCHOR: body -->

View File

@@ -4,6 +4,7 @@ description = "continuwuity is a community continuation of the conduwuit Matrix
language = "en"
authors = ["The continuwuity Community"]
text-direction = "ltr"
multilingual = false
src = "docs"
[build]
@@ -17,7 +18,7 @@ edition = "2024"
[output.html]
edit-url-template = "https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/{path}"
git-repository-url = "https://forgejo.ellis.link/continuwuation/continuwuity"
git-repository-icon = "fab-git-alt"
git-repository-icon = "fa-git-alt"
[output.html.search]
limit-results = 15

View File

@@ -957,21 +957,6 @@
#
#rocksdb_bottommost_compression = true
# Compression algorithm for RocksDB's Write-Ahead-Log (WAL).
#
# At present, only ZSTD compression is supported by RocksDB for WAL
# compression. Enabling this can reduce WAL size at the expense of some
# CPU usage during writes.
#
# The options are:
# - "none" = No compression
# - "zstd" = ZSTD compression
#
# For more information on WAL compression, see:
# https://github.com/facebook/rocksdb/wiki/WAL-Compression
#
#rocksdb_wal_compression = "zstd"
# Database recovery mode (for RocksDB WAL corruption).
#
# Use this option when the server reports corruption and refuses to start.
@@ -1512,19 +1497,6 @@
#
#block_non_admin_invites = false
# Enable or disable making requests to MSC4284 Policy Servers.
# It is recommended you keep this enabled unless you experience frequent
# connectivity issues, such as in a restricted networking environment.
#
#enable_msc4284_policy_servers = true
# Enable running locally generated events through configured MSC4284
# policy servers. You may wish to disable this if your server is
# single-user for a slight speed benefit in some rooms, but otherwise
# should leave it enabled.
#
#policy_server_check_own_events = true
# Allow admins to enter commands in rooms other than "#admins" (admin
# room) by prefixing your message with "\!admin" or "\\!admin" followed up
# a normal continuwuity admin command. The reply will be publicly visible

View File

@@ -48,7 +48,7 @@ EOF
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.16.0
ENV BINSTALL_VERSION=1.15.4
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree
@@ -166,7 +166,7 @@ ARG RUST_PROFILE=release
# Build the binary
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git/db \
--mount=type=cache,target=/app/target,id=continuwuity-cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-${RUST_PROFILE} \
--mount=type=cache,target=/app/target,id=cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-${RUST_PROFILE} \
bash <<'EOF'
set -o allexport
set -o xtrace

View File

@@ -18,7 +18,7 @@ RUN --mount=type=cache,target=/etc/apk/cache apk add \
# Developer tool versions
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
ENV BINSTALL_VERSION=1.16.0
ENV BINSTALL_VERSION=1.15.4
# renovate: datasource=github-releases depName=psastras/sbom-rs
ENV CARGO_SBOM_VERSION=0.9.1
# renovate: datasource=crate depName=lddtree
@@ -122,7 +122,7 @@ ARG RUST_PROFILE=release
# Build the binary
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git/db \
--mount=type=cache,target=/app/target,id=continuwuity-cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-musl-${RUST_PROFILE} \
--mount=type=cache,target=/app/target,id=cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-musl-${RUST_PROFILE} \
bash <<'EOF'
set -o allexport
set -o xtrace

View File

@@ -10,7 +10,6 @@ # Summary
- [Kubernetes](deploying/kubernetes.md)
- [Arch Linux](deploying/arch-linux.md)
- [Debian](deploying/debian.md)
- [Fedora](deploying/fedora.md)
- [FreeBSD](deploying/freebsd.md)
- [TURN](turn.md)
- [Appservices](appservices.md)

View File

@@ -1078,10 +1078,7 @@ ###### **Subcommands:**
* `delete` — - Deletes a single media file from our database and on the filesystem via a single MXC URL or event ID (not redacted)
* `delete-list` — - Deletes a codeblock list of MXC URLs from our database and on the filesystem. This will always ignore errors
* `delete-past-remote-media` — Deletes all remote (and optionally local) media created before/after
[duration] ago, using filesystem metadata first created at date, or
fallback to last modified date. This will always ignore errors by
default.
* `delete-past-remote-media` - Deletes all remote (and optionally local) media created before or after [duration] time using filesystem metadata first created at date, or fallback to last modified date. This will always ignore errors by default
* `delete-all-from-user` — - Deletes all the local media from a local user on our server. This will always ignore errors by default
* `delete-all-from-server` — - Deletes all remote media from the specified remote server. This will always ignore errors by default
* `get-file-info`
@@ -1113,25 +1110,13 @@ ## `admin media delete-list`
## `admin media delete-past-remote-media`
Deletes all remote (and optionally local) media created before/after
[duration] ago, using filesystem metadata first created at date, or
fallback to last modified date. This will always ignore errors by
default.
* Examples:
* Delete all remote media older than a year:
`!admin media delete-past-remote-media -b 1y`
* Delete all remote and local media from 3 days ago, up until now:
`!admin media delete-past-remote-media -a 3d --yes-i-want-to-delete-local-media`
- Deletes all remote (and optionally local) media created before or after [duration] time using filesystem metadata first created at date, or fallback to last modified date. This will always ignore errors by default
**Usage:** `admin media delete-past-remote-media [OPTIONS] <DURATION>`
###### **Arguments:**
* `<DURATION>` — - The relative time (e.g. 30s, 5m, 7d) from now within which to search
* `<DURATION>` — - The relative time (e.g. 30s, 5m, 7d) within which to search
###### **Options:**

View File

@@ -17,5 +17,3 @@ ## systemd unit file
```
{{#include ../../pkg/conduwuit.service}}
```
</details>

View File

@@ -1,201 +0,0 @@
# RPM Installation Guide
Continuwuity is available as RPM packages for Fedora, RHEL, and compatible distributions.
The RPM packaging files are maintained in the `fedora/` directory:
- `continuwuity.spec.rpkg` - RPM spec file using rpkg macros for building from git
- `continuwuity.service` - Systemd service file for the server
- `RPM-GPG-KEY-continuwuity.asc` - GPG public key for verifying signed packages
RPM packages built by CI are signed with our GPG key (Ed25519, ID: `5E0FF73F411AAFCA`).
```bash
# Import the signing key
sudo rpm --import https://forgejo.ellis.link/continuwuation/continuwuity/raw/branch/main/fedora/RPM-GPG-KEY-continuwuity.asc
# Verify a downloaded package
rpm --checksig continuwuity-*.rpm
```
## Installation methods
**Stable releases** (recommended)
```bash
# Add the repository and install
sudo dnf config-manager addrepo --from-repofile=https://forgejo.ellis.link/api/packages/continuwuation/rpm/stable/continuwuation.repo
sudo dnf install continuwuity
```
**Development builds** from main branch
```bash
# Add the dev repository and install
sudo dnf config-manager addrepo --from-repofile=https://forgejo.ellis.link/api/packages/continuwuation/rpm/dev/continuwuation.repo
sudo dnf install continuwuity
```
**Feature branch builds** (example: `tom/new-feature`)
```bash
# Branch names are sanitized (slashes become hyphens, lowercase only)
sudo dnf config-manager addrepo --from-repofile=https://forgejo.ellis.link/api/packages/continuwuation/rpm/tom-new-feature/continuwuation.repo
sudo dnf install continuwuity
```
**Direct installation** without adding repository
```bash
# Latest stable release
sudo dnf install https://forgejo.ellis.link/api/packages/continuwuation/rpm/stable/continuwuity
# Latest development build
sudo dnf install https://forgejo.ellis.link/api/packages/continuwuation/rpm/dev/continuwuity
# Specific feature branch
sudo dnf install https://forgejo.ellis.link/api/packages/continuwuation/rpm/branch-name/continuwuity
```
**Manual repository configuration** (alternative method)
```bash
cat << 'EOF' | sudo tee /etc/yum.repos.d/continuwuity.repo
[continuwuity]
name=Continuwuity - Matrix homeserver
baseurl=https://forgejo.ellis.link/api/packages/continuwuation/rpm/stable
enabled=1
gpgcheck=1
gpgkey=https://forgejo.ellis.link/continuwuation/continuwuity/raw/branch/main/fedora/RPM-GPG-KEY-continuwuity.asc
EOF
sudo dnf install continuwuity
```
## Package management
**Automatic updates** with DNF Automatic
```bash
# Install and configure
sudo dnf install dnf-automatic
sudo nano /etc/dnf/automatic.conf # Set: apply_updates = yes
sudo systemctl enable --now dnf-automatic.timer
```
**Manual updates**
```bash
# Check for updates
sudo dnf check-update continuwuity
# Update to latest version
sudo dnf update continuwuity
```
**Switching channels** (stable/dev/feature branches)
```bash
# List enabled repositories
dnf repolist | grep continuwuation
# Disable current repository
sudo dnf config-manager --set-disabled continuwuation-stable # or -dev, or branch name
# Enable desired repository
sudo dnf config-manager --set-enabled continuwuation-dev # or -stable, or branch name
# Update to the new channel's version
sudo dnf update continuwuity
```
**Verifying installation**
```bash
# Check installed version
rpm -q continuwuity
# View package information
rpm -qi continuwuity
# List installed files
rpm -ql continuwuity
# Verify package integrity
rpm -V continuwuity
```
## Service management and removal
**Systemd service commands**
```bash
# Start the service
sudo systemctl start conduwuit
# Enable on boot
sudo systemctl enable conduwuit
# Check status
sudo systemctl status conduwuit
# View logs
sudo journalctl -u conduwuit -f
```
**Uninstallation**
```bash
# Stop and disable the service
sudo systemctl stop conduwuit
sudo systemctl disable conduwuit
# Remove the package
sudo dnf remove continuwuity
# Remove the repository (optional)
sudo rm /etc/yum.repos.d/continuwuation-*.repo
```
## Troubleshooting
**GPG key errors**: Temporarily disable GPG checking
```bash
sudo dnf --nogpgcheck install continuwuity
```
**Repository metadata issues**: Clear and rebuild cache
```bash
sudo dnf clean all
sudo dnf makecache
```
**Finding specific versions**
```bash
# List all available versions
dnf --showduplicates list continuwuity
# Install a specific version
sudo dnf install continuwuity-<version>
```
## Building locally
Build the RPM locally using rpkg:
```bash
# Install dependencies
sudo dnf install rpkg rpm-build cargo-rpm-macros systemd-rpm-macros
# Clone the repository
git clone https://forgejo.ellis.link/continuwuation/continuwuity.git
cd continuwuity
# Build SRPM
rpkg srpm
# Build RPM
rpmbuild --rebuild *.src.rpm
```

View File

@@ -241,7 +241,7 @@ ## Documentation
### Code Comments
- Reference related documentation or parts of the specification
- When a task has multiple ways of being achieved, explain your reasoning for your decision
- When a task has multiple ways of being acheved, explain your reasoning for your decision
- Update comments when code changes
```rs

View File

@@ -8,10 +8,6 @@
{
"id": 3,
"message": "_taps microphone_ The Continuwuity 0.5.0-rc.7 release is now available, and it's better than ever! **177 commits**, **35 pull requests**, **11 contributors,** and a lot of new stuff!\n\nFor highlights, we've got:\n\n* 🕵️ Full Policy Server support to fight spam!\n* 🚀 Smarter room & space upgrades.\n* 🚫 User suspension tools for better moderation.\n* 🤖 reCaptcha support for safer open registration.\n* 🔍 Ability to disable read receipts & typing indicators.\n* ⚡ Sweeping performance improvements!\n\nGet the [full changelog and downloads on our Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.7) - and make sure you're in the [Announcements room](https://matrix.to/#/!releases:continuwuity.org/$hN9z6L2_dTAlPxFLAoXVfo_g8DyYXu4cpvWsSrWhmB0) to get stuff like this sooner."
},
{
"id": 5,
"message": "It's a bird! It's a plane! No, it's 0.5.0-rc.8.1!\n\nThis is a minor bugfix update to the rc8 which backports some important fixes from the latest main branch. If you still haven't updated to rc8, you should skip to main. Otherwise, you should upgrade to this bugfix release as soon as possible.\n\nBugfixes backported to this version:\n\n- Resolved several issues with state resolution v2.1 (room version 12)\n- Fixed issues with the `restricted` and `knock_restricted` join rules that would sometimes incorrectly disallow a valid join\n- Fixed the automatic support contact listing being a no-op\n- Fixed upgrading pre-v12 rooms to v12 rooms\n- Fixed policy servers sending the incorrect JSON objects (resulted in false positives)\n- Fixed debug build panic during MSC4133 migration\n\nIt is recommended, if you can and are comfortable with doing so, following updates to the main branch - we're in the run up to the full 0.5.0 release, and more and more bugfixes and new features are being pushed constantly. Please don't forget to join [#announcements:continuwuity.org](https://matrix.to/#/#announcements:continuwuity.org) to receive this news faster and be alerted to other important updates!"
}
]
}

451
flake.lock generated
View File

@@ -1,28 +1,94 @@
{
"nodes": {
"advisory-db": {
"flake": false,
"attic": {
"inputs": {
"crane": "crane",
"flake-compat": "flake-compat",
"flake-parts": "flake-parts",
"nix-github-actions": "nix-github-actions",
"nixpkgs": "nixpkgs",
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1761112158,
"narHash": "sha256-RIXu/7eyKpQHjsPuAUODO81I4ni8f+WYSb7K4mTG6+0=",
"owner": "rustsec",
"repo": "advisory-db",
"rev": "58f3aaec0e1776f4a900737be8cd7cb00972210d",
"lastModified": 1756403898,
"narHash": "sha256-S4SJDmVTtbcXaJkYrMFkcA5SDrpfRHlBbzwp6IRRPAw=",
"owner": "zhaofengli",
"repo": "attic",
"rev": "2524dd1c007bc7a0a9e9c863a1b02de8d54b319b",
"type": "github"
},
"original": {
"owner": "rustsec",
"repo": "advisory-db",
"owner": "zhaofengli",
"ref": "main",
"repo": "attic",
"type": "github"
}
},
"cachix": {
"inputs": {
"devenv": "devenv",
"flake-compat": "flake-compat_2",
"git-hooks": "git-hooks",
"nixpkgs": "nixpkgs_2"
},
"locked": {
"lastModified": 1756385612,
"narHash": "sha256-+NU5MMhuPHHRyvZZWNFG7zt+leRSPsJu1MwhOUzkPUk=",
"owner": "cachix",
"repo": "cachix",
"rev": "dc24688cd67518c3711d511fa369c0f5a131063a",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "master",
"repo": "cachix",
"type": "github"
}
},
"cachix_2": {
"inputs": {
"devenv": [
"cachix",
"devenv"
],
"flake-compat": [
"cachix",
"devenv"
],
"git-hooks": [
"cachix",
"devenv",
"git-hooks"
],
"nixpkgs": [
"cachix",
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1748883665,
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
"owner": "cachix",
"repo": "cachix",
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "latest",
"repo": "cachix",
"type": "github"
}
},
"crane": {
"locked": {
"lastModified": 1760924934,
"narHash": "sha256-tuuqY5aU7cUkR71sO2TraVKK2boYrdW3gCSXUkF4i44=",
"lastModified": 1751562746,
"narHash": "sha256-smpugNIkmDeicNz301Ll1bD7nFOty97T79m4GUMUczA=",
"owner": "ipetkov",
"repo": "crane",
"rev": "c6b4d5308293d0d04fcfeee92705017537cad02f",
"rev": "aed2020fd3dc26e1e857d4107a5a67a33ab6c1fd",
"type": "github"
},
"original": {
@@ -31,6 +97,53 @@
"type": "github"
}
},
"crane_2": {
"locked": {
"lastModified": 1757183466,
"narHash": "sha256-kTdCCMuRE+/HNHES5JYsbRHmgtr+l9mOtf5dpcMppVc=",
"owner": "ipetkov",
"repo": "crane",
"rev": "d599ae4847e7f87603e7082d73ca673aa93c916d",
"type": "github"
},
"original": {
"owner": "ipetkov",
"ref": "master",
"repo": "crane",
"type": "github"
}
},
"devenv": {
"inputs": {
"cachix": "cachix_2",
"flake-compat": [
"cachix",
"flake-compat"
],
"git-hooks": [
"cachix",
"git-hooks"
],
"nix": "nix",
"nixpkgs": [
"cachix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1754404745,
"narHash": "sha256-BdbW/iTImczgcuATgQIa9sPGuYIBxVq2xqcvICsa2AQ=",
"owner": "cachix",
"repo": "devenv",
"rev": "6563b21105168f90394dfaf58284b078af2d7275",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"fenix": {
"inputs": {
"nixpkgs": [
@@ -39,20 +152,53 @@
"rust-analyzer-src": "rust-analyzer-src"
},
"locked": {
"lastModified": 1761115517,
"narHash": "sha256-Fev/ag/c3Fp3JBwHfup3lpA5FlNXfkoshnQ7dssBgJ0=",
"lastModified": 1757400094,
"narHash": "sha256-5Rcs6juMoMTaMJSR1glravl4QB9yLAFBD8s7KLi4kdQ=",
"owner": "nix-community",
"repo": "fenix",
"rev": "320433651636186ea32b387cff05d6bbfa30cea7",
"rev": "0682b9b518792c9428865c511a4c40c9ad85c243",
"type": "github"
},
"original": {
"owner": "nix-community",
"ref": "main",
"repo": "fenix",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1747046372,
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_3": {
"flake": false,
"locked": {
"lastModified": 1747046372,
@@ -71,14 +217,17 @@
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
"nixpkgs-lib": [
"attic",
"nixpkgs"
]
},
"locked": {
"lastModified": 1760948891,
"narHash": "sha256-TmWcdiUUaWk8J4lpjzu4gCGxWY6/Ok7mOK4fIFfBuU4=",
"lastModified": 1751413152,
"narHash": "sha256-Tyw1RjYEsp5scoigs1384gIg6e0GoBVjms4aXFfRssQ=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "864599284fc7c0ba6357ed89ed5e2cd5040f0c04",
"rev": "77826244401ea9de6e3bac47c2db46005e1f30b5",
"type": "github"
},
"original": {
@@ -87,13 +236,214 @@
"type": "github"
}
},
"flake-parts_2": {
"inputs": {
"nixpkgs-lib": [
"cachix",
"devenv",
"nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1733312601,
"narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
"owner": "numtide",
"ref": "main",
"repo": "flake-utils",
"type": "github"
}
},
"git-hooks": {
"inputs": {
"flake-compat": [
"cachix",
"flake-compat"
],
"gitignore": "gitignore",
"nixpkgs": [
"cachix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1750779888,
"narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=",
"owner": "cachix",
"repo": "git-hooks.nix",
"rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "git-hooks.nix",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"cachix",
"git-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": [
"cachix",
"devenv",
"flake-compat"
],
"flake-parts": "flake-parts_2",
"git-hooks-nix": [
"cachix",
"devenv",
"git-hooks"
],
"nixpkgs": [
"cachix",
"devenv",
"nixpkgs"
],
"nixpkgs-23-11": [
"cachix",
"devenv"
],
"nixpkgs-regression": [
"cachix",
"devenv"
]
},
"locked": {
"lastModified": 1752773918,
"narHash": "sha256-dOi/M6yNeuJlj88exI+7k154z+hAhFcuB8tZktiW7rg=",
"owner": "cachix",
"repo": "nix",
"rev": "031c3cf42d2e9391eee373507d8c12e0f9606779",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "devenv-2.30",
"repo": "nix",
"type": "github"
}
},
"nix-filter": {
"locked": {
"lastModified": 1731533336,
"narHash": "sha256-oRam5PS1vcrr5UPgALW0eo1m/5/pls27Z/pabHNy2Ms=",
"owner": "numtide",
"repo": "nix-filter",
"rev": "f7653272fd234696ae94229839a99b73c9ab7de0",
"type": "github"
},
"original": {
"owner": "numtide",
"ref": "main",
"repo": "nix-filter",
"type": "github"
}
},
"nix-github-actions": {
"inputs": {
"nixpkgs": [
"attic",
"nixpkgs"
]
},
"locked": {
"lastModified": 1737420293,
"narHash": "sha256-F1G5ifvqTpJq7fdkT34e/Jy9VCyzd5XfJ9TO8fHhJWE=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "f4158fa080ef4503c8f4c820967d946c2af31ec9",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1760878510,
"narHash": "sha256-K5Osef2qexezUfs0alLvZ7nQFTGS9DL2oTVsIXsqLgs=",
"lastModified": 1751949589,
"narHash": "sha256-mgFxAPLWw0Kq+C8P3dRrZrOYEQXOtKuYVlo9xvPntt8=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5e2a59a5b1a82f89f2c7e598302a9cacebb72a67",
"rev": "9b008d60392981ad674e04016d25619281550a9d",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1751741127,
"narHash": "sha256-t75Shs76NgxjZSgvvZZ9qOmz5zuBE8buUaYD28BMTxg=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "29e290002bfff26af1db6f64d070698019460302",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-25.05",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1754214453,
"narHash": "sha256-Q/I2xJn/j1wpkGhWkQnm20nShYnG7TI99foDBpXm1SY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "5b09dc45f24cf32316283e62aec81ffee3c3e376",
"type": "github"
},
"original": {
@@ -103,40 +453,42 @@
"type": "github"
}
},
"nixpkgs-lib": {
"nixpkgs_3": {
"locked": {
"lastModified": 1754788789,
"narHash": "sha256-x2rJ+Ovzq0sCMpgfgGaaqgBSwY+LST+WbZ6TytnT9Rk=",
"owner": "nix-community",
"repo": "nixpkgs.lib",
"rev": "a73b9c743612e4244d865a2fdee11865283c04e6",
"lastModified": 1757034884,
"narHash": "sha256-PgLSZDBEWUHpfTRfFyklmiiLBE1i1aGCtz4eRA3POao=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "ca77296380960cd497a765102eeb1356eb80fed0",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nixpkgs.lib",
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"advisory-db": "advisory-db",
"crane": "crane",
"attic": "attic",
"cachix": "cachix",
"crane": "crane_2",
"fenix": "fenix",
"flake-compat": "flake-compat",
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs",
"treefmt-nix": "treefmt-nix"
"flake-compat": "flake-compat_3",
"flake-utils": "flake-utils",
"nix-filter": "nix-filter",
"nixpkgs": "nixpkgs_3"
}
},
"rust-analyzer-src": {
"flake": false,
"locked": {
"lastModified": 1761077270,
"narHash": "sha256-O1uTuvI/rUlubJ8AXKyzh1WSWV3qCZX0huTFUvWLN4E=",
"lastModified": 1757362324,
"narHash": "sha256-/PAhxheUq4WBrW5i/JHzcCqK5fGWwLKdH6/Lu1tyS18=",
"owner": "rust-lang",
"repo": "rust-analyzer",
"rev": "39990a923c8bca38f5bd29dc4c96e20ee7808d5d",
"rev": "9edc9cbe5d8e832b5864e09854fa94861697d2fd",
"type": "github"
},
"original": {
@@ -146,23 +498,18 @@
"type": "github"
}
},
"treefmt-nix": {
"inputs": {
"nixpkgs": [
"nixpkgs"
]
},
"systems": {
"locked": {
"lastModified": 1760945191,
"narHash": "sha256-ZRVs8UqikBa4Ki3X4KCnMBtBW0ux1DaT35tgsnB1jM4=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "f56b1934f5f8fcab8deb5d38d42fd692632b47c2",
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "treefmt-nix",
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}

371
flake.nix
View File

@@ -1,46 +1,351 @@
{
description = "A nix flake for the continuwuity project";
inputs = {
# basics
flake-parts.url = "github:hercules-ci/flake-parts";
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
# for rust via nix
crane.url = "github:ipetkov/crane";
attic.url = "github:zhaofengli/attic?ref=main";
cachix.url = "github:cachix/cachix?ref=master";
crane = {
url = "github:ipetkov/crane?ref=master";
};
fenix = {
url = "github:nix-community/fenix";
url = "github:nix-community/fenix?ref=main";
inputs.nixpkgs.follows = "nixpkgs";
};
# for vuln checks
advisory-db = {
url = "github:rustsec/advisory-db";
flake = false;
};
treefmt-nix = {
url = "github:numtide/treefmt-nix";
inputs.nixpkgs.follows = "nixpkgs";
};
# for default.nix
flake-compat = {
url = "github:edolstra/flake-compat?ref=master";
flake = false;
};
flake-utils.url = "github:numtide/flake-utils?ref=main";
nix-filter.url = "github:numtide/nix-filter?ref=main";
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
};
outputs =
inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [ ./nix ];
systems = [
# good support
"x86_64-linux"
# support untested but theoretically there
"aarch64-linux"
];
};
inputs:
inputs.flake-utils.lib.eachDefaultSystem (
system:
let
pkgsHost = import inputs.nixpkgs {
inherit system;
};
fnx = inputs.fenix.packages.${system};
# The Rust toolchain to use
toolchain = fnx.combine [
(fnx.fromToolchainFile {
file = ./rust-toolchain.toml;
# See also `rust-toolchain.toml`
sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE=";
})
fnx.complete.rustfmt
];
mkScope =
pkgs:
pkgs.lib.makeScope pkgs.newScope (self: {
inherit pkgs inputs;
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
main = self.callPackage ./pkg/nix/pkgs/main { };
liburing = pkgs.liburing.overrideAttrs {
# Tests weren't building
outputs = [
"out"
"dev"
"man"
];
buildFlags = [ "library" ];
};
rocksdb =
(pkgs.rocksdb_9_10.override {
# Override the liburing input for the build with our own so
# we have it built with the library flag
inherit (self) liburing;
}).overrideAttrs
(old: {
src = pkgsHost.fetchFromGitea {
domain = "forgejo.ellis.link";
owner = "continuwuation";
repo = "rocksdb";
rev = "10.4.fb";
sha256 = "sha256-/Hvy1yTH/0D5aa7bc+/uqFugCQq4InTdwlRw88vA5IY=";
};
version = "v10.4.fb";
cmakeFlags =
pkgs.lib.subtractLists [
# No real reason to have snappy or zlib, no one uses this
"-DWITH_SNAPPY=1"
"-DZLIB=1"
"-DWITH_ZLIB=1"
# We don't need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=1"
# We don't need to build rocksdb tests
"-DWITH_TESTS=1"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"-DUSE_RTTI=1"
# This doesn't exist in RocksDB, and USE_SSE is deprecated for
# PORTABLE=$(march)
"-DFORCE_SSE42=1"
# PORTABLE will get set in main/default.nix
"-DPORTABLE=1"
] old.cmakeFlags
++ [
# No real reason to have snappy, no one uses this
"-DWITH_SNAPPY=0"
"-DZLIB=0"
"-DWITH_ZLIB=0"
# We don't need to use ldb or sst_dump (core_tools)
"-DWITH_CORE_TOOLS=0"
# We don't need trace tools
"-DWITH_TRACE_TOOLS=0"
# We don't need to build rocksdb tests
"-DWITH_TESTS=0"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"-DUSE_RTTI=0"
];
# outputs has "tools" which we don't need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we don't need or use
preInstall = "";
# We have this already at https://forgejo.ellis.link/continuwuation/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
# Unsetting this so we don't have to revert it and make this nix exclusive
patches = [ ];
postPatch = ''
# Fix gcc-13 build failures due to missing <cstdint> and
# <system_error> includes, fixed upstream since 8.x
sed -e '1i #include <cstdint>' -i db/compaction/compaction_iteration_stats.h
sed -e '1i #include <cstdint>' -i table/block_based/data_block_hash_index.h
sed -e '1i #include <cstdint>' -i util/string_util.h
sed -e '1i #include <cstdint>' -i include/rocksdb/utilities/checkpoint.h
'';
});
});
scopeHost = mkScope pkgsHost;
mkCrossScope =
crossSystem:
let
pkgsCrossStatic =
(import inputs.nixpkgs {
inherit system;
crossSystem = {
config = crossSystem;
};
}).pkgsStatic;
in
mkScope pkgsCrossStatic;
in
{
packages =
{
default = scopeHost.main.override {
disable_features = [
# Don't include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
default-debug = scopeHost.main.override {
profile = "dev";
# Debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
"experimental"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
# Just a test profile used for things like CI and complement
default-test = scopeHost.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
all-features = scopeHost.main.override {
all_features = true;
disable_features = [
# Don't include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
all-features-debug = scopeHost.main.override {
profile = "dev";
all_features = true;
# Debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# Don't include experimental features
"experimental"
# This is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
hmalloc = scopeHost.main.override { features = [ "hardened_malloc" ]; };
}
// builtins.listToAttrs (
builtins.concatLists (
builtins.map
(
crossSystem:
let
binaryName = "static-${crossSystem}";
scopeCrossStatic = mkCrossScope crossSystem;
in
[
# An output for a statically-linked binary
{
name = binaryName;
value = scopeCrossStatic.main;
}
# An output for a statically-linked binary with x86_64 haswell
# target optimisations
{
name = "${binaryName}-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
x86_64_haswell_target_optimised =
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
};
}
# An output for a statically-linked unstripped debug ("dev") binary
{
name = "${binaryName}-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
# debug build users expect full logs
disable_release_max_log_level = true;
};
}
# An output for a statically-linked unstripped debug binary with the
# "test" profile (for CI usage only)
{
name = "${binaryName}-test";
value = scopeCrossStatic.main.override {
profile = "test";
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features`
{
name = "${binaryName}-all-features";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with `--all-features` and with x86_64 haswell
# target optimisations
{
name = "${binaryName}-all-features-x86_64-haswell-optimised";
value = scopeCrossStatic.main.override {
all_features = true;
disable_features = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
x86_64_haswell_target_optimised =
if (crossSystem == "x86_64-linux-gnu" || crossSystem == "x86_64-linux-musl") then true else false;
};
}
# An output for a statically-linked unstripped debug ("dev") binary with `--all-features`
{
name = "${binaryName}-all-features-debug";
value = scopeCrossStatic.main.override {
profile = "dev";
all_features = true;
# debug build users expect full logs
disable_release_max_log_level = true;
disable_features = [
# dont include experimental features
"experimental"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
];
};
}
# An output for a statically-linked binary with hardened_malloc
{
name = "${binaryName}-hmalloc";
value = scopeCrossStatic.main.override {
features = [ "hardened_malloc" ];
};
}
]
)
[
#"x86_64-apple-darwin"
#"aarch64-apple-darwin"
"x86_64-linux-gnu"
"x86_64-linux-musl"
"aarch64-linux-musl"
]
)
);
}
);
}

View File

@@ -1,108 +0,0 @@
{ inputs, ... }:
{
perSystem =
{
self',
lib,
pkgs,
...
}:
let
uwulib = inputs.self.uwulib.init pkgs;
rocksdbAllFeatures = self'.packages.rocksdb.override {
enableJemalloc = true;
enableLiburing = true;
};
commonAttrs = (uwulib.build.commonAttrs { }) // {
buildInputs = [
pkgs.liburing
pkgs.rust-jemalloc-sys-unprefixed
rocksdbAllFeatures
];
nativeBuildInputs = [
pkgs.pkg-config
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgs.rustPlatform.bindgenHook
];
env = {
LIBCLANG_PATH = lib.makeLibraryPath [ pkgs.llvmPackages.libclang.lib ];
LD_LIBRARY_PATH = lib.makeLibraryPath [
pkgs.liburing
pkgs.rust-jemalloc-sys-unprefixed
rocksdbAllFeatures
];
}
// uwulib.environment.buildPackageEnv
// {
ROCKSDB_INCLUDE_DIR = "${rocksdbAllFeatures}/include";
ROCKSDB_LIB_DIR = "${rocksdbAllFeatures}/lib";
};
};
cargoArtifacts = self'.packages.continuwuity-all-features-deps;
in
{
# taken from
#
# https://crane.dev/examples/quick-start.html
checks = {
continuwuity-all-features-build = self'.packages.continuwuity-all-features-bin;
continuwuity-all-features-clippy = uwulib.build.craneLibForChecks.cargoClippy (
commonAttrs
// {
inherit cargoArtifacts;
cargoClippyExtraArgs = "-- --deny warnings";
}
);
continuwuity-all-features-docs = uwulib.build.craneLibForChecks.cargoDoc (
commonAttrs
// {
inherit cargoArtifacts;
# This can be commented out or tweaked as necessary, e.g. set to
# `--deny rustdoc::broken-intra-doc-links` to only enforce that lint
env.RUSTDOCFLAGS = "--deny warnings";
}
);
# Check formatting
continuwuity-all-features-fmt = uwulib.build.craneLibForChecks.cargoFmt {
src = uwulib.build.src;
};
continuwuity-all-features-toml-fmt = uwulib.build.craneLibForChecks.taploFmt {
src = pkgs.lib.sources.sourceFilesBySuffices uwulib.build.src [ ".toml" ];
# taplo arguments can be further customized below as needed
taploExtraArgs = "--config ${inputs.self}/taplo.toml";
};
# Audit dependencies
continuwuity-all-features-audit = uwulib.build.craneLibForChecks.cargoAudit {
inherit (inputs) advisory-db;
src = uwulib.build.src;
};
# Audit licenses
continuwuity-all-features-deny = uwulib.build.craneLibForChecks.cargoDeny {
src = uwulib.build.src;
};
# Run tests with cargo-nextest
# Consider setting `doCheck = false` on `continuwuity-all-features` if you do not want
# the tests to run twice
continuwuity-all-features-nextest = uwulib.build.craneLibForChecks.cargoNextest (
commonAttrs
// {
inherit cargoArtifacts;
partitions = 1;
partitionType = "count";
cargoNextestPartitionsExtraArgs = "--no-tests=pass";
}
);
};
};
}

View File

@@ -1,11 +0,0 @@
{
imports = [
./checks
./packages
./shells
./tests
./hydra.nix
./fmt.nix
];
}

View File

@@ -1,37 +0,0 @@
{ inputs, ... }:
{
# load the flake module from upstream
imports = [ inputs.treefmt-nix.flakeModule ];
perSystem =
{ self', lib, ... }:
{
treefmt = {
# repo root as project root
projectRoot = inputs.self;
# the formatters
programs = {
nixfmt.enable = true;
typos = {
enable = true;
configFile = "${inputs.self}/.typos.toml";
};
taplo = {
enable = true;
settings = lib.importTOML "${inputs.self}/taplo.toml";
};
};
settings.formatter.rustfmt = {
command = "${lib.getExe' self'.packages.dev-toolchain "rustfmt"}";
includes = [ "**/*.rs" ];
options = [
"--unstable-features"
"--edition=2024"
"--config-path=${inputs.self}/rustfmt.toml"
];
};
};
};
}

View File

@@ -1,9 +0,0 @@
{ inputs, ... }:
let
lib = inputs.nixpkgs.lib;
in
{
flake.hydraJobs.packages = builtins.mapAttrs (
_name: lib.hydraJob
) inputs.self.packages.x86_64-linux;
}

View File

@@ -1,60 +0,0 @@
{ inputs, ... }:
{
perSystem =
{
self',
lib,
pkgs,
...
}:
let
uwulib = inputs.self.uwulib.init pkgs;
in
{
packages =
lib.pipe
[
# this is the default variant
{
variantName = "default";
commonAttrsArgs.profile = "release";
rocksdb = self'.packages.rocksdb;
features = { };
}
# this is the variant with all features enabled (liburing + jemalloc)
{
variantName = "all-features";
commonAttrsArgs.profile = "release";
rocksdb = self'.packages.rocksdb.override {
enableJemalloc = true;
enableLiburing = true;
};
features = {
enabledFeatures = "all";
disabledFeatures = uwulib.features.defaultDisabledFeatures ++ [ "bindgen-static" ];
};
}
]
[
(builtins.map (cfg: rec {
deps = {
name = "continuwuity-${cfg.variantName}-deps";
value = uwulib.build.buildDeps {
features = uwulib.features.calcFeatures cfg.features;
inherit (cfg) commonAttrsArgs rocksdb;
};
};
bin = {
name = "continuwuity-${cfg.variantName}-bin";
value = uwulib.build.buildPackage {
deps = self'.packages.${deps.name};
features = uwulib.features.calcFeatures cfg.features;
inherit (cfg) commonAttrsArgs rocksdb;
};
};
}))
(builtins.concatMap builtins.attrValues)
builtins.listToAttrs
];
};
}

View File

@@ -1,14 +0,0 @@
{
imports = [
./continuwuity
./rocksdb
./rust.nix
./uwulib
];
perSystem =
{ self', ... }:
{
packages.default = self'.packages.continuwuity-default-bin;
};
}

View File

@@ -1,12 +0,0 @@
{
perSystem =
{
pkgs,
...
}:
{
packages = {
rocksdb = pkgs.callPackage ./package.nix { };
};
};
}

View File

@@ -1,88 +0,0 @@
{
lib,
stdenv,
rocksdb,
liburing,
rust-jemalloc-sys-unprefixed,
enableJemalloc ? false,
enableLiburing ? false,
fetchFromGitea,
...
}:
let
notDarwin = !stdenv.hostPlatform.isDarwin;
in
(rocksdb.override {
# Override the liburing input for the build with our own so
# we have it built with the library flag
inherit liburing;
jemalloc = rust-jemalloc-sys-unprefixed;
# rocksdb fails to build with prefixed jemalloc, which is required on
# darwin due to [1]. In this case, fall back to building rocksdb with
# libc malloc. This should not cause conflicts, because all of the
# jemalloc symbols are prefixed.
#
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
enableJemalloc = enableJemalloc && notDarwin;
# for some reason enableLiburing in nixpkgs rocksdb is default true
# which breaks Darwin entirely
enableLiburing = enableLiburing && notDarwin;
}).overrideAttrs
(old: {
src = fetchFromGitea {
domain = "forgejo.ellis.link";
owner = "continuwuation";
repo = "rocksdb";
rev = "10.5.fb";
sha256 = "sha256-X4ApGLkHF9ceBtBg77dimEpu720I79ffLoyPa8JMHaU=";
};
version = "10.5.fb";
cmakeFlags =
lib.subtractLists (builtins.map (flag: lib.cmakeBool flag true) [
# No real reason to have snappy or zlib, no one uses this
"WITH_SNAPPY"
"ZLIB"
"WITH_ZLIB"
# We don't need to use ldb or sst_dump (core_tools)
"WITH_CORE_TOOLS"
# We don't need to build rocksdb tests
"WITH_TESTS"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"USE_RTTI"
# This doesn't exist in RocksDB, and USE_SSE is deprecated for
# PORTABLE=$(march)
"FORCE_SSE42"
]) old.cmakeFlags
++ (builtins.map (flag: lib.cmakeBool flag false) [
# No real reason to have snappy, no one uses this
"WITH_SNAPPY"
"ZLIB"
"WITH_ZLIB"
# We don't need to use ldb or sst_dump (core_tools)
"WITH_CORE_TOOLS"
# We don't need trace tools
"WITH_TRACE_TOOLS"
# We don't need to build rocksdb tests
"WITH_TESTS"
# We use rust-rocksdb via C interface and don't need C++ RTTI
"USE_RTTI"
]);
enableLiburing = enableLiburing && notDarwin;
# outputs has "tools" which we don't need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we don't need or use
preInstall = "";
# We have this already at https://forgejo.ellis.link/continuwuation/rocksdb/commit/a935c0273e1ba44eacf88ce3685a9b9831486155
# Unsetting `patches` so we don't have to revert it and make this nix exclusive
patches = [ ];
})

View File

@@ -1,32 +0,0 @@
{ inputs, ... }:
{
perSystem =
{
system,
lib,
...
}:
{
packages =
let
fnx = inputs.fenix.packages.${system};
stable = fnx.fromToolchainFile {
file = inputs.self + "/rust-toolchain.toml";
# See also `rust-toolchain.toml`
sha256 = "sha256-SJwZ8g0zF2WrKDVmHrVG3pD2RGoQeo24MEXnNx5FyuI=";
};
in
{
# used for building nix stuff (doesn't include rustfmt overhead)
build-toolchain = stable;
# used for dev shells
dev-toolchain = fnx.combine [
stable
# use the nightly rustfmt because we use nightly features
fnx.complete.rustfmt
];
};
};
}

View File

@@ -1,108 +0,0 @@
args@{ pkgs, inputs, ... }:
let
inherit (pkgs) lib;
uwuenv = import ./environment.nix args;
selfpkgs = inputs.self.packages.${pkgs.stdenv.system};
in
rec {
# basic, very minimal instance of the crane library with a minimal rust toolchain
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: selfpkgs.build-toolchain);
# the checks require more rust toolchain components, hence we have this separate instance of the crane library
craneLibForChecks = (inputs.crane.mkLib pkgs).overrideToolchain (_: selfpkgs.dev-toolchain);
# meta information (name, version, etc) of the rust crate based on the Cargo.toml
crateInfo = craneLib.crateNameFromCargoToml { cargoToml = "${inputs.self}/Cargo.toml"; };
src =
let
# see https://crane.dev/API.html#cranelibfiltercargosources
#
# we need to keep the `web` directory which would be filtered out by the regular source filtering function
#
# https://crane.dev/API.html#cranelibcleancargosource
isWebTemplate = path: _type: builtins.match ".*src/web.*" path != null;
isRust = craneLib.filterCargoSources;
isNix = path: _type: builtins.match ".+/nix.*" path != null;
webOrRustNotNix = p: t: !(isNix p t) && (isWebTemplate p t || isRust p t);
in
lib.cleanSourceWith {
src = inputs.self;
filter = webOrRustNotNix;
name = "source";
};
# common attrs that are shared between building continuwuity's deps and the package itself
commonAttrs =
{
profile ? "dev",
...
}:
{
inherit (crateInfo)
pname
version
;
inherit src;
# this prevents unnecessary rebuilds
strictDeps = true;
dontStrip = profile == "dev" || profile == "test";
dontPatchELF = profile == "dev" || profile == "test";
doCheck = true;
nativeBuildInputs = [
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgs.rustPlatform.bindgenHook
];
};
makeRocksDBEnv =
{ rocksdb }:
{
ROCKSDB_INCLUDE_DIR = "${rocksdb}/include";
ROCKSDB_LIB_DIR = "${rocksdb}/lib";
};
# function that builds the continuwuity dependencies derivation
buildDeps =
{
rocksdb,
features,
commonAttrsArgs,
}:
craneLib.buildDepsOnly (
(commonAttrs commonAttrsArgs)
// {
env = uwuenv.buildDepsOnlyEnv // (makeRocksDBEnv { inherit rocksdb; });
inherit (features) cargoExtraArgs;
}
);
# function that builds the continuwuity package
buildPackage =
{
deps,
rocksdb,
features,
commonAttrsArgs,
}:
let
rocksdbEnv = makeRocksDBEnv { inherit rocksdb; };
in
craneLib.buildPackage (
(commonAttrs commonAttrsArgs)
// {
cargoArtifacts = deps;
doCheck = true;
env = uwuenv.buildPackageEnv // rocksdbEnv;
passthru.env = uwuenv.buildPackageEnv // rocksdbEnv;
meta.mainProgram = crateInfo.pname;
inherit (features) cargoExtraArgs;
}
);
}

View File

@@ -1,10 +0,0 @@
{ inputs, ... }:
{
flake.uwulib = {
init = pkgs: {
features = import ./features.nix { inherit pkgs inputs; };
environment = import ./environment.nix { inherit pkgs inputs; };
build = import ./build.nix { inherit pkgs inputs; };
};
};
}

View File

@@ -1,18 +0,0 @@
args@{ pkgs, inputs, ... }:
let
uwubuild = import ./build.nix args;
in
rec {
buildDepsOnlyEnv = {
# https://crane.dev/faq/rebuilds-bindgen.html
NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa";
CARGO_PROFILE = "release";
}
// uwubuild.craneLib.mkCrossToolchainEnv (p: pkgs.clangStdenv);
buildPackageEnv = {
GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or "";
GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
}
// buildDepsOnlyEnv;
}

View File

@@ -1,77 +0,0 @@
{ pkgs, inputs, ... }:
let
inherit (pkgs) lib;
in
rec {
defaultDisabledFeatures = [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
# we don't want to enable this feature set by default but be more specific about it
"full"
];
# We perform default-feature unification in nix, because some of the dependencies
# on the nix side depend on feature values.
calcFeatures =
{
tomlPath ? "${inputs.self}/src/main",
# either a list of feature names or a string "all" which enables all non-default features
enabledFeatures ? [ ],
disabledFeatures ? defaultDisabledFeatures,
default_features ? true,
disable_release_max_log_level ? false,
}:
let
# simple helper to get the contents of a Cargo.toml file in a nix format
getToml = path: lib.importTOML "${path}/Cargo.toml";
# get all the features except for the default features
allFeatures = lib.pipe tomlPath [
getToml
(manifest: manifest.features)
lib.attrNames
(lib.remove "default")
];
# get just the default enabled features
allDefaultFeatures = lib.pipe tomlPath [
getToml
(manifest: manifest.features.default)
];
# depending on the value of enabledFeatures choose just a set or all non-default features
#
# - [ list of features ] -> choose exactly the features listed
# - "all" -> choose all non-default features
additionalFeatures = if enabledFeatures == "all" then allFeatures else enabledFeatures;
# unification with default features (if enabled)
features = lib.unique (additionalFeatures ++ lib.optionals default_features allDefaultFeatures);
# prepare the features that are subtracted from the set
disabledFeatures' =
disabledFeatures ++ lib.optionals disable_release_max_log_level [ "release_max_log_level" ];
# construct the final feature set
finalFeatures = lib.subtractLists disabledFeatures' features;
in
{
# final feature set, useful for querying it
features = finalFeatures;
# crane flag with the relevant features
cargoExtraArgs = builtins.concatStringsSep " " [
"--no-default-features"
"--locked"
(lib.optionalString (finalFeatures != [ ]) "--features")
(builtins.concatStringsSep "," finalFeatures)
];
};
}

View File

@@ -1,29 +0,0 @@
{ inputs, ... }:
{
perSystem =
{
self',
lib,
pkgs,
...
}:
let
uwulib = inputs.self.uwulib.init pkgs;
rocksdbAllFeatures = self'.packages.rocksdb.override {
enableJemalloc = true;
enableLiburing = true;
};
in
{
# basic nix shell containing all things necessary to build continuwuity in all flavors manually (on x86_64-linux)
devShells.default = uwulib.build.craneLib.devShell {
packages = [
pkgs.pkg-config
pkgs.liburing
pkgs.rust-jemalloc-sys-unprefixed
rocksdbAllFeatures
];
env.LIBCLANG_PATH = lib.makeLibraryPath [ pkgs.llvmPackages.libclang.lib ];
};
};
}

View File

@@ -1,124 +0,0 @@
{
perSystem =
{
self',
lib,
pkgs,
...
}:
{
# run some nixos tests as checks
checks = lib.pipe self'.packages [
# we take all packages (names)
builtins.attrNames
# we filter out all packages that end with `-bin` (which we are interested in for testing)
(builtins.filter (lib.hasSuffix "-bin"))
# for each of these binaries we built the basic nixos test
#
# this test was initially yoinked from
#
# https://github.com/NixOS/nixpkgs/blob/960ce26339661b1b69c6f12b9063ca51b688615f/nixos/tests/matrix/continuwuity.nix
(builtins.map (name: {
name = "test-${name}";
value = pkgs.testers.runNixOSTest {
inherit name;
nodes = {
continuwuity = {
services.matrix-continuwuity = {
enable = true;
package = self'.packages.${name};
settings.global = {
server_name = name;
address = [ "0.0.0.0" ];
allow_registration = true;
yes_i_am_very_very_sure_i_want_an_open_registration_server_prone_to_abuse = true;
};
extraEnvironment.RUST_BACKTRACE = "yes";
};
networking.firewall.allowedTCPPorts = [ 6167 ];
};
client =
{ pkgs, ... }:
{
environment.systemPackages = [
(pkgs.writers.writePython3Bin "do_test" { libraries = [ pkgs.python3Packages.matrix-nio ]; } ''
import asyncio
import nio
async def main() -> None:
# Connect to continuwuity
client = nio.AsyncClient("http://continuwuity:6167", "alice")
# Register as user alice
response = await client.register("alice", "my-secret-password")
# Log in as user alice
response = await client.login("my-secret-password")
# Create a new room
response = await client.room_create(federate=False)
print("Matrix room create response:", response)
assert isinstance(response, nio.RoomCreateResponse)
room_id = response.room_id
# Join the room
response = await client.join(room_id)
print("Matrix join response:", response)
assert isinstance(response, nio.JoinResponse)
# Send a message to the room
response = await client.room_send(
room_id=room_id,
message_type="m.room.message",
content={
"msgtype": "m.text",
"body": "Hello continuwuity!"
}
)
print("Matrix room send response:", response)
assert isinstance(response, nio.RoomSendResponse)
# Sync responses
response = await client.sync(timeout=30000)
print("Matrix sync response:", response)
assert isinstance(response, nio.SyncResponse)
# Check the message was received by continuwuity
last_message = response.rooms.join[room_id].timeline.events[-1].body
assert last_message == "Hello continuwuity!"
# Leave the room
response = await client.room_leave(room_id)
print("Matrix room leave response:", response)
assert isinstance(response, nio.RoomLeaveResponse)
# Close the client
await client.close()
if __name__ == "__main__":
asyncio.run(main())
'')
];
};
};
testScript = ''
start_all()
with subtest("start continuwuity"):
continuwuity.wait_for_unit("continuwuity.service")
continuwuity.wait_for_open_port(6167)
with subtest("ensure messages can be exchanged"):
client.succeed("do_test >&2")
'';
};
}))
builtins.listToAttrs
];
};
}

View File

@@ -12,14 +12,13 @@ Group=conduwuit
Type=notify-reload
ReloadSignal=SIGUSR1
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
Environment="CONTINUWUITY_DATABASE_PATH=%S/conduwuit"
Environment="CONTINUWUITY_CONFIG_RELOAD_SIGNAL=true"
Environment="CONTINUWUITY_DATABASE_PATH=/var/lib/conduwuit"
LoadCredential=conduwuit.toml:/etc/conduwuit/conduwuit.toml
ExecStart=/usr/bin/conduwuit --config ${CREDENTIALS_DIRECTORY}/conduwuit.toml
ExecStart=/usr/bin/conduwuit
AmbientCapabilities=
CapabilityBoundingSet=
@@ -53,9 +52,8 @@ SystemCallFilter=@system-service @resources
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
SystemCallErrorNumber=EPERM
# ConfigurationDirectory isn't specified here because it's created by
# the distro's package manager.
StateDirectory=conduwuit
ConfigurationDirectory=conduwuit
RuntimeDirectory=conduwuit
RuntimeDirectoryMode=0750

View File

@@ -1,29 +1,13 @@
# Continuwuity for Debian
This document provides information about downloading and deploying the Debian package. You can also use this guide for other deb-based distributions such as Ubuntu.
This document provides information about downloading and deploying the Debian package. You can also use this guide for other `apt`-based distributions such as Ubuntu.
### Installation
To add the Continuwuation apt repository:
```bash
# Replace with `"dev"` for bleeding-edge builds at your own risk
export COMPONENT="stable"
# Import the Continuwuation signing key
sudo curl https://forgejo.ellis.link/api/packages/continuwuation/debian/repository.key -o /etc/apt/keyrings/forgejo-continuwuation.asc
# Add a new apt source list pointing to the repository
echo "deb [signed-by=/etc/apt/keyrings/forgejo-continuwuation.asc] https://forgejo.ellis.link/api/packages/continuwuation/debian $(lsb_release -sc) $COMPONENT" | sudo tee /etc/apt/sources.list.d/continuwuation.list
# Update remote package lists
sudo apt update
```
To install continuwuity:
```bash
sudo apt install continuwuity
```
The `continuwuity` package conflicts with the old `conduwuit` package and will remove it automatically when installed.
See the [generic deployment guide](../deploying/generic.md) for additional information about using the Debian package.
No `apt` repository is currently available. This feature is in development.
### Configuration
After installation, Continuwuity places the example configuration at `/etc/conduwuit/conduwuit.toml` as the default configuration file. The configuration file indicates which settings you must change before starting the service.
@@ -32,7 +16,7 @@ ### Configuration
### Running
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary installs at `/usr/bin/conduwuit`.
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary installs at `/usr/sbin/conduwuit`.
By default, this package assumes that Continuwuity runs behind a reverse proxy. The default configuration options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS. To federate properly, you must set up TLS certificates and certificate renewal.

View File

@@ -1,5 +1,6 @@
# This should be run using rpkg: https://docs.pagure.org/rpkg
# This should be run using rpkg-util: https://docs.pagure.org/rpkg-util
# it requires Internet access and is not suitable for Fedora main repos
# TODO: rpkg-util is no longer maintained, find a replacement
Name: continuwuity
Version: {{{ git_repo_version }}}
@@ -51,7 +52,7 @@ find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
%install
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
install -Dpm0644 pkg/conduwuit.service -t %{buildroot}%{_unitdir}
install -Dpm0600 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
install -Dpm0644 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
%files
%license LICENSE
@@ -60,7 +61,7 @@ install -Dpm0600 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/con
%doc CONTRIBUTING.md
%doc README.md
%doc SECURITY.md
%config(noreplace) %{_sysconfdir}/conduwuit/conduwuit.toml
%config %{_sysconfdir}/conduwuit/conduwuit.toml
%{_bindir}/conduwuit
%{_unitdir}/conduwuit.service

View File

@@ -0,0 +1,83 @@
{ lib
, pkgsBuildHost
, rust
, stdenv
}:
lib.optionalAttrs stdenv.hostPlatform.isStatic
{
ROCKSDB_STATIC = "";
}
//
{
CARGO_BUILD_RUSTFLAGS =
lib.concatStringsSep
" "
(lib.optionals
stdenv.hostPlatform.isStatic
[ "-C" "relocation-model=static" ]
++ lib.optionals
(stdenv.buildPlatform.config != stdenv.hostPlatform.config)
[
"-l"
"c"
"-l"
"stdc++"
"-L"
"${stdenv.cc.cc.lib}/${stdenv.hostPlatform.config}/lib"
]
);
}
# What follows is stolen from [here][0]. Its purpose is to properly
# configure compilers and linkers for various stages of the build, and
# even covers the case of build scripts that need native code compiled and
# run on the build platform (I think).
#
# [0]: https://github.com/NixOS/nixpkgs/blob/nixpkgs-unstable/pkgs/build-support/rust/lib/default.nix#L48-L68
//
(
let
inherit (rust.lib) envVars;
in
lib.optionalAttrs
(stdenv.targetPlatform.rust.rustcTarget
!= stdenv.hostPlatform.rust.rustcTarget)
(
let
inherit (stdenv.targetPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForTarget;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForTarget;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForTarget;
}
)
//
(
let
inherit (stdenv.hostPlatform.rust) cargoEnvVarTarget rustcTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForHost;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForHost;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForHost;
CARGO_BUILD_TARGET = rustcTarget;
}
)
//
(
let
inherit (stdenv.buildPlatform.rust) cargoEnvVarTarget;
in
{
"CC_${cargoEnvVarTarget}" = envVars.ccForBuild;
"CXX_${cargoEnvVarTarget}" = envVars.cxxForBuild;
"CARGO_TARGET_${cargoEnvVarTarget}_LINKER" = envVars.ccForBuild;
HOST_CC = "${pkgsBuildHost.stdenv.cc}/bin/cc";
HOST_CXX = "${pkgsBuildHost.stdenv.cc}/bin/c++";
}
)
)

View File

@@ -0,0 +1,224 @@
# Dependencies (keep sorted)
{ craneLib
, inputs
, jq
, lib
, libiconv
, liburing
, pkgsBuildHost
, rocksdb
, removeReferencesTo
, rust
, rust-jemalloc-sys
, stdenv
# Options (keep sorted)
, all_features ? false
, default_features ? true
# default list of disabled features
, disable_features ? [
# dont include experimental features
"experimental"
# jemalloc profiling/stats features are expensive and shouldn't
# be expected on non-debug builds.
"jemalloc_prof"
"jemalloc_stats"
# this is non-functional on nix for some reason
"hardened_malloc"
# conduwuit_mods is a development-only hot reload feature
"conduwuit_mods"
]
, disable_release_max_log_level ? false
, features ? [ ]
, profile ? "release"
# rocksdb compiled with -march=haswell and target-cpu=haswell rustflag
# haswell is pretty much any x86 cpu made in the last 12 years, and
# supports modern CPU extensions that rocksdb can make use of.
# disable if trying to make a portable x86_64 build for very old hardware
, x86_64_haswell_target_optimised ? false
}:
let
# We perform default-feature unification in nix, because some of the dependencies
# on the nix side depend on feature values.
crateFeatures = path:
let manifest = lib.importTOML "${path}/Cargo.toml"; in
lib.remove "default" (lib.attrNames manifest.features);
crateDefaultFeatures = path:
(lib.importTOML "${path}/Cargo.toml").features.default;
allDefaultFeatures = crateDefaultFeatures "${inputs.self}/src/main";
allFeatures = crateFeatures "${inputs.self}/src/main";
features' = lib.unique
(features ++
lib.optionals default_features allDefaultFeatures ++
lib.optionals all_features allFeatures);
disable_features' = disable_features ++ lib.optionals disable_release_max_log_level [ "release_max_log_level" ];
features'' = lib.subtractLists disable_features' features';
featureEnabled = feature: builtins.elem feature features'';
enableLiburing = featureEnabled "io_uring" && !stdenv.hostPlatform.isDarwin;
# This derivation will set the JEMALLOC_OVERRIDE variable, causing the
# tikv-jemalloc-sys crate to use the nixpkgs jemalloc instead of building it's
# own. In order for this to work, we need to set flags on the build that match
# whatever flags tikv-jemalloc-sys was going to use. These are dependent on
# which features we enable in tikv-jemalloc-sys.
rust-jemalloc-sys' = (rust-jemalloc-sys.override {
# tikv-jemalloc-sys/unprefixed_malloc_on_supported_platforms feature
unprefixed = true;
}).overrideAttrs (old: {
configureFlags = old.configureFlags ++
# we dont need docs
[ "--disable-doc" ] ++
# we dont need cxx/C++ integration
[ "--disable-cxx" ] ++
# tikv-jemalloc-sys/profiling feature
lib.optional (featureEnabled "jemalloc_prof") "--enable-prof" ++
# tikv-jemalloc-sys/stats feature
(if (featureEnabled "jemalloc_stats") then [ "--enable-stats" ] else [ "--disable-stats" ]);
});
buildDepsOnlyEnv =
let
rocksdb' = (rocksdb.override {
jemalloc = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys';
# rocksdb fails to build with prefixed jemalloc, which is required on
# darwin due to [1]. In this case, fall back to building rocksdb with
# libc malloc. This should not cause conflicts, because all of the
# jemalloc symbols are prefixed.
#
# [1]: https://github.com/tikv/jemallocator/blob/ab0676d77e81268cd09b059260c75b38dbef2d51/jemalloc-sys/src/env.rs#L17
enableJemalloc = featureEnabled "jemalloc" && !stdenv.hostPlatform.isDarwin;
# for some reason enableLiburing in nixpkgs rocksdb is default true
# which breaks Darwin entirely
inherit enableLiburing;
}).overrideAttrs (old: {
inherit enableLiburing;
cmakeFlags = (if x86_64_haswell_target_optimised then
(lib.subtractLists [
# dont make a portable build if x86_64_haswell_target_optimised is enabled
"-DPORTABLE=1"
]
old.cmakeFlags
++ [ "-DPORTABLE=haswell" ]) else [ "-DPORTABLE=1" ]
)
++ old.cmakeFlags;
# outputs has "tools" which we dont need or use
outputs = [ "out" ];
# preInstall hooks has stuff for messing with ldb/sst_dump which we dont need or use
preInstall = "";
});
in
{
# https://crane.dev/faq/rebuilds-bindgen.html
NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa";
CARGO_PROFILE = profile;
ROCKSDB_INCLUDE_DIR = "${rocksdb'}/include";
ROCKSDB_LIB_DIR = "${rocksdb'}/lib";
}
//
(import ./cross-compilation-env.nix {
# Keep sorted
inherit
lib
pkgsBuildHost
rust
stdenv;
});
buildPackageEnv = {
GIT_COMMIT_HASH = inputs.self.rev or inputs.self.dirtyRev or "";
GIT_COMMIT_HASH_SHORT = inputs.self.shortRev or inputs.self.dirtyShortRev or "";
} // buildDepsOnlyEnv // {
# Only needed in static stdenv because these are transitive dependencies of rocksdb
CARGO_BUILD_RUSTFLAGS = buildDepsOnlyEnv.CARGO_BUILD_RUSTFLAGS
+ lib.optionalString (enableLiburing && stdenv.hostPlatform.isStatic)
" -L${lib.getLib liburing}/lib -luring"
+ lib.optionalString x86_64_haswell_target_optimised
" -Ctarget-cpu=haswell";
};
commonAttrs = {
inherit
(craneLib.crateNameFromCargoToml {
cargoToml = "${inputs.self}/Cargo.toml";
})
pname
version;
src = let filter = inputs.nix-filter.lib; in filter {
root = inputs.self;
# Keep sorted
include = [
".cargo"
"Cargo.lock"
"Cargo.toml"
"src"
"xtask"
];
};
doCheck = true;
cargoExtraArgs = "--no-default-features --locked "
+ lib.optionalString
(features'' != [ ])
"--features " + (builtins.concatStringsSep "," features'');
dontStrip = profile == "dev" || profile == "test";
dontPatchELF = profile == "dev" || profile == "test";
buildInputs = lib.optional (featureEnabled "jemalloc") rust-jemalloc-sys'
# needed to build Rust applications on macOS
++ lib.optionals stdenv.hostPlatform.isDarwin [
# https://github.com/NixOS/nixpkgs/issues/206242
# ld: library not found for -liconv
libiconv
# https://stackoverflow.com/questions/69869574/properly-adding-darwin-apple-sdk-to-a-nix-shell
# https://discourse.nixos.org/t/compile-a-rust-binary-on-macos-dbcrossbar/8612
pkgsBuildHost.darwin.apple_sdk.frameworks.Security
];
nativeBuildInputs = [
# bindgen needs the build platform's libclang. Apparently due to "splicing
# weirdness", pkgs.rustPlatform.bindgenHook on its own doesn't quite do the
# right thing here.
pkgsBuildHost.rustPlatform.bindgenHook
# We don't actually depend on `jq`, but crane's `buildPackage` does, but
# its `buildDepsOnly` doesn't. This causes those two derivations to have
# differing values for `NIX_CFLAGS_COMPILE`, which contributes to spurious
# rebuilds of bindgen and its depedents.
jq
];
};
in
craneLib.buildPackage (commonAttrs // {
cargoArtifacts = craneLib.buildDepsOnly (commonAttrs // {
env = buildDepsOnlyEnv;
});
doCheck = true;
cargoExtraArgs = "--no-default-features --locked "
+ lib.optionalString
(features'' != [ ])
"--features " + (builtins.concatStringsSep "," features'');
env = buildPackageEnv;
passthru = {
env = buildPackageEnv;
};
meta.mainProgram = commonAttrs.pname;
})

View File

@@ -10,19 +10,15 @@
"nix": {
"enabled": true
},
"pre-commit": {
"enabled": true
},
"labels": ["Dependencies", "Dependencies/Renovate"],
"ignoreDeps": [
"tikv-jemallocator",
"tikv-jemalloc-sys",
"tikv-jemalloc-ctl",
"rustyline-async",
"event-listener",
"async-channel",
"core_affinity",
"hyper-util"
"opentelemetry",
"opentelemetry_sdk",
"opentelemetry-jaeger",
"tracing-opentelemetry"
],
"github-actions": {
"enabled": true,
@@ -68,8 +64,12 @@
"matchDatasources": ["docker"],
"matchPackageNames": ["ghcr.io/renovatebot/renovate"],
"automerge": true,
"automergeStrategy": "fast-forward",
"extends": ["schedule:earlyMondays"]
"automergeStrategy": "fast-forward"
},
{
"description": "Group lockfile updates into a single PR",
"matchUpdateTypes": ["lockFileMaintenance"],
"groupName": "lockfile-maintenance"
}
],
"customManagers": [
@@ -81,7 +81,7 @@
"/(^|/|\\.)([Dd]ocker|[Cc]ontainer)file$/"
],
"matchStrings": [
"# renovate: datasource=(?<datasource>[a-zA-Z0-9-._]+?) depName=(?<depName>[^\\s]+?)(?: (lookupName|packageName)=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[^\\s]+?))?(?: extractVersion=(?<extractVersion>[^\\s]+?))?(?: registryUrl=(?<registryUrl>[^\\s]+?))?\\s+(?:ENV\\s+|ARG\\s+)?[A-Za-z0-9_]+?_VERSION[ =][\"']?(?<currentValue>.+?)[\"']?\\s+(?:(?:ENV\\s+|ARG\\s+)?[A-Za-z0-9_]+?_CHECKSUM[ =][\"']?(?<currentDigest>.+?)[\"']?\\s)?"
"# renovate: datasource=(?<datasource>[a-z-.]+?) depName=(?<depName>[^\\s]+?)(?: (lookupName|packageName)=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[^\\s]+?))?(?: extractVersion=(?<extractVersion>[^\\s]+?))?(?: registryUrl=(?<registryUrl>[^\\s]+?))?\\s+(?:ENV|ARG)\\s+[A-Za-z0-9_]+?_VERSION[ =][\"']?(?<currentValue>.+?)[\"']?\\s"
]
}
]

View File

@@ -10,7 +10,7 @@
[toolchain]
profile = "minimal"
channel = "1.90.0"
channel = "1.89.0"
components = [
# For rust-analyzer
"rust-src",

View File

@@ -85,7 +85,7 @@ futures.workspace = true
log.workspace = true
ruma.workspace = true
serde_json.workspace = true
serde-saphyr.workspace = true
serde_yml.workspace = true
tokio.workspace = true
tracing-subscriber.workspace = true
tracing.workspace = true

View File

@@ -16,7 +16,7 @@ pub(super) async fn register(&self) -> Result {
let range = 1..checked!(body_len - 1)?;
let appservice_config_body = body[range].join("\n");
let parsed_config = serde_saphyr::from_str(&appservice_config_body);
let parsed_config = serde_yml::from_str(&appservice_config_body);
match parsed_config {
| Err(e) => return Err!("Could not parse appservice config as YAML: {e}"),
| Ok(registration) => match self
@@ -57,7 +57,7 @@ pub(super) async fn show_appservice_config(&self, appservice_identifier: String)
{
| None => return Err!("Appservice does not exist."),
| Some(config) => {
let config_str = serde_saphyr::to_string(&config)?;
let config_str = serde_yml::to_string(&config)?;
write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```")
},
}

View File

@@ -2,8 +2,7 @@
use conduwuit::{
Err, Result, debug, debug_info, debug_warn, error, info, trace,
utils::time::{TimeDirection, parse_timepoint_ago},
warn,
utils::time::parse_timepoint_ago, warn,
};
use conduwuit_service::media::Dim;
use ruma::{Mxc, OwnedEventId, OwnedMxcUri, OwnedServerName};
@@ -236,19 +235,14 @@ pub(super) async fn delete_past_remote_media(
}
assert!(!(before && after), "--before and --after should not be specified together");
let direction = if after {
TimeDirection::After
} else {
TimeDirection::Before
};
let time_boundary = parse_timepoint_ago(&duration)?;
let duration = parse_timepoint_ago(&duration)?;
let deleted_count = self
.services
.media
.delete_all_media_within_timeframe(
time_boundary,
direction,
.delete_all_remote_media_at_after_time(
duration,
before,
after,
yes_i_want_to_delete_local_media,
)
.await?;

View File

@@ -27,24 +27,12 @@ pub enum MediaCommand {
/// filesystem. This will always ignore errors.
DeleteList,
/// Deletes all remote (and optionally local) media created before/after
/// [duration] ago, using filesystem metadata first created at date, or
/// fallback to last modified date. This will always ignore errors by
/// default.
///
/// * Examples:
/// * Delete all remote media older than a year:
///
/// `!admin media delete-past-remote-media -b 1y`
///
/// * Delete all remote and local media from 3 days ago, up until now:
///
/// `!admin media delete-past-remote-media -a 3d
/// --yes-i-want-to-delete-local-media`
#[command(verbatim_doc_comment)]
/// - Deletes all remote (and optionally local) media created before or
/// after [duration] time using filesystem metadata first created at date,
/// or fallback to last modified date. This will always ignore errors by
/// default.
DeletePastRemoteMedia {
/// - The relative time (e.g. 30s, 5m, 7d) from now within which to
/// search
/// - The relative time (e.g. 30s, 5m, 7d) within which to search
duration: String,
/// - Only delete media created before [duration] ago

View File

@@ -64,14 +64,10 @@ pub(crate) async fn create_content_route(
media_id: &utils::random_string(MXC_LENGTH),
};
if let Err(e) = services
services
.media
.create(mxc, Some(user), Some(&content_disposition), content_type, &body.file)
.await
{
err!("Failed to save uploaded media: {e}");
return Err!(Request(Unknown("Failed to save uploaded media")));
}
.await?;
let blurhash = body.generate_blurhash.then(|| {
services

View File

@@ -4,14 +4,11 @@
Err, Result, debug_error, err, info,
matrix::{event::gen_event_id_canonical_json, pdu::PduBuilder},
};
use futures::FutureExt;
use futures::{FutureExt, join};
use ruma::{
OwnedServerName, RoomId, UserId,
api::{client::membership::invite_user, federation::membership::create_invite},
events::{
invite_permission_config::FilterLevel,
room::member::{MembershipState, RoomMemberEventContent},
},
events::room::member::{MembershipState, RoomMemberEventContent},
};
use service::Services;
@@ -50,21 +47,22 @@ pub(crate) async fn invite_user_route(
.await?;
match &body.recipient {
| invite_user::v3::InvitationRecipient::UserId { user_id: recipient_user } => {
let sender_filter_level = services
.users
.invite_filter_level(recipient_user, sender_user)
.await;
| invite_user::v3::InvitationRecipient::UserId { user_id } => {
let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id);
let recipient_ignored_by_sender =
services.users.user_is_ignored(user_id, sender_user);
if !matches!(sender_filter_level, FilterLevel::Allow) {
// drop invites if the sender has the recipient filtered
let (sender_ignored_recipient, recipient_ignored_by_sender) =
join!(sender_ignored_recipient, recipient_ignored_by_sender);
if sender_ignored_recipient {
return Ok(invite_user::v3::Response {});
}
if let Ok(target_user_membership) = services
.rooms
.state_accessor
.get_member(&body.room_id, recipient_user)
.get_member(&body.room_id, user_id)
.await
{
if target_user_membership.membership == MembershipState::Ban {
@@ -72,27 +70,16 @@ pub(crate) async fn invite_user_route(
}
}
// check for blocked invites if the recipient is a local user.
if services.globals.user_is_local(recipient_user) {
let recipient_filter_level = services
.users
.invite_filter_level(sender_user, recipient_user)
.await;
// ignored invites aren't handled here
// since the recipient's membership should still be changed to `invite`.
// they're filtered out in the individual /sync handlers.
if matches!(recipient_filter_level, FilterLevel::Block) {
return Err!(Request(InviteBlocked(
"{recipient_user} has blocked invites from you."
)));
}
if recipient_ignored_by_sender {
// silently drop the invite to the recipient if they've been ignored by the
// sender, pretend it worked
return Ok(invite_user::v3::Response {});
}
invite_helper(
&services,
sender_user,
recipient_user,
user_id,
&body.room_id,
body.reason.clone(),
false,
@@ -111,7 +98,7 @@ pub(crate) async fn invite_user_route(
pub(crate) async fn invite_helper(
services: &Services,
sender_user: &UserId,
recipient_user: &UserId,
user_id: &UserId,
room_id: &RoomId,
reason: Option<String>,
is_direct: bool,
@@ -124,12 +111,12 @@ pub(crate) async fn invite_helper(
return Err!(Request(Forbidden("Invites are not allowed on this server.")));
}
if !services.globals.user_is_local(recipient_user) {
if !services.globals.user_is_local(user_id) {
let (pdu, pdu_json, invite_room_state) = {
let state_lock = services.rooms.state.mutex.lock(room_id).await;
let content = RoomMemberEventContent {
avatar_url: services.users.avatar_url(recipient_user).await.ok(),
avatar_url: services.users.avatar_url(user_id).await.ok(),
is_direct: Some(is_direct),
reason,
..RoomMemberEventContent::new(MembershipState::Invite)
@@ -139,7 +126,7 @@ pub(crate) async fn invite_helper(
.rooms
.timeline
.create_hash_and_sign_event(
PduBuilder::state(recipient_user.to_string(), &content),
PduBuilder::state(user_id.to_string(), &content),
sender_user,
Some(room_id),
&state_lock,
@@ -157,7 +144,7 @@ pub(crate) async fn invite_helper(
let response = services
.sending
.send_federation_request(recipient_user.server_name(), create_invite::v2::Request {
.send_federation_request(user_id.server_name(), create_invite::v2::Request {
room_id: room_id.to_owned(),
event_id: (*pdu.event_id).to_owned(),
room_version: room_version_id.clone(),
@@ -186,7 +173,7 @@ pub(crate) async fn invite_helper(
return Err!(Request(BadJson(warn!(
%pdu.event_id, %event_id,
"Server {} sent event with wrong event ID",
recipient_user.server_name()
user_id.server_name()
))));
}
@@ -226,9 +213,9 @@ pub(crate) async fn invite_helper(
let state_lock = services.rooms.state.mutex.lock(room_id).await;
let content = RoomMemberEventContent {
displayname: services.users.displayname(recipient_user).await.ok(),
avatar_url: services.users.avatar_url(recipient_user).await.ok(),
blurhash: services.users.blurhash(recipient_user).await.ok(),
displayname: services.users.displayname(user_id).await.ok(),
avatar_url: services.users.avatar_url(user_id).await.ok(),
blurhash: services.users.blurhash(user_id).await.ok(),
is_direct: Some(is_direct),
reason,
..RoomMemberEventContent::new(MembershipState::Invite)
@@ -238,7 +225,7 @@ pub(crate) async fn invite_helper(
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(recipient_user.to_string(), &content),
PduBuilder::state(user_id.to_string(), &content),
sender_user,
Some(room_id),
&state_lock,

View File

@@ -313,14 +313,11 @@ pub async fn join_room_by_id_helper(
}
}
if !server_in_room && servers.is_empty() {
return Err!(Request(NotFound(
"No servers were provided to assist in joining the room remotely, and we are not \
already participating in the room."
)));
}
let local_join = server_in_room
|| servers.is_empty()
|| (servers.len() == 1 && services.globals.server_is_ours(&servers[0]));
if server_in_room {
if local_join {
join_room_by_id_helper_local(
services,
sender_user,
@@ -742,7 +739,6 @@ async fn join_room_by_id_helper_local(
.iter()
.stream()
.any(|restriction_room_id| {
trace!("Checking if {sender_user} is joined to {restriction_room_id}");
services
.rooms
.state_cache
@@ -755,7 +751,6 @@ async fn join_room_by_id_helper_local(
.state_cache
.local_users_in_room(room_id)
.filter(|user| {
trace!("Checking if {user} can invite {sender_user} to {room_id}");
services.rooms.state_accessor.user_can_invite(
room_id,
user,
@@ -768,7 +763,6 @@ async fn join_room_by_id_helper_local(
.await
.map(ToOwned::to_owned)
} else {
trace!("No restriction rooms are joined by {sender_user}");
None
}
};

View File

@@ -30,7 +30,6 @@
events::{
AnyStateEvent, StateEventType,
TimelineEventType::{self, *},
invite_permission_config::FilterLevel,
},
serde::Raw,
};
@@ -268,7 +267,7 @@ pub(crate) async fn ignored_filter(
pub(crate) async fn is_ignored_pdu<Pdu>(
services: &Services,
event: &Pdu,
recipient_user: &UserId,
user_id: &UserId,
) -> bool
where
Pdu: Event + Send + Sync,
@@ -279,29 +278,20 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
return true;
}
let sender_user = event.sender();
let type_ignored = IGNORED_MESSAGE_TYPES.binary_search(event.kind()).is_ok();
let server_ignored = services
let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(event.kind()).is_ok();
let ignored_server = services
.moderation
.is_remote_server_ignored(sender_user.server_name());
let user_ignored = services
.users
.user_is_ignored(sender_user, recipient_user)
.await;
.is_remote_server_ignored(event.sender().server_name());
if !type_ignored {
// We cannot safely ignore this type
return false;
}
if server_ignored {
// the sender's server is ignored, so ignore this event
return true;
}
if user_ignored && !services.config.send_messages_from_ignored_users_to_client {
// the recipient of this PDU has the sender ignored, and we're not
// configured to send ignored messages to clients
if ignored_type
&& (ignored_server
|| (!services.config.send_messages_from_ignored_users_to_client
&& services
.users
.user_is_ignored(event.sender(), user_id)
.await))
{
return true;
}
@@ -330,29 +320,6 @@ pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Opti
filter.matches(pdu).then_some(item)
}
#[inline]
pub(crate) async fn is_ignored_invite(
services: &Services,
recipient_user: &UserId,
room_id: &RoomId,
) -> bool {
let Ok(sender_user) = services
.rooms
.state_cache
.invite_sender(recipient_user, room_id)
.await
else {
// the invite may have been sent before the invite_sender table existed.
// assume it's not ignored
return false;
};
services
.users
.invite_filter_level(&sender_user, recipient_user)
.await == FilterLevel::Ignore
}
#[cfg_attr(debug_assertions, ctor::ctor)]
fn _is_sorted() {
debug_assert!(

View File

@@ -1,4 +1,4 @@
use std::collections::{BTreeMap, BTreeSet};
use std::collections::BTreeMap;
use axum::extract::State;
use conduwuit::{
@@ -13,7 +13,6 @@
api::client::room::{self, create_room},
events::{
TimelineEventType,
invite_permission_config::FilterLevel,
room::{
canonical_alias::RoomCanonicalAliasEventContent,
create::RoomCreateEventContent,
@@ -122,40 +121,6 @@ pub(crate) async fn create_room_route(
return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed")));
}
let mut invitees = BTreeSet::new();
for recipient_user in &body.invite {
if !matches!(
services
.users
.invite_filter_level(recipient_user, sender_user)
.await,
FilterLevel::Allow
) {
// drop invites if the creator has them blocked
continue;
}
// if the recipient of the invite is local and has the sender blocked, error
// out. if the recipient is remote we can't tell yet, and if they're local and
// have the sender _ignored_ their invite will be filtered out in
// the handlers for the individual /sync endpoints
if services.globals.user_is_local(recipient_user)
&& matches!(
services
.users
.invite_filter_level(sender_user, recipient_user)
.await,
FilterLevel::Block
) {
return Err!(Request(InviteBlocked(
"{recipient_user} has blocked invites from you."
)));
}
invitees.insert(recipient_user.clone());
}
let alias: Option<OwnedRoomAliasId> = match body.room_alias_name.as_ref() {
| Some(alias) =>
Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?),
@@ -287,11 +252,19 @@ pub(crate) async fn create_room_route(
| _ => RoomPreset::PrivateChat, // Room visibility should not be custom
});
let mut power_levels_to_grant = BTreeMap::from_iter([(sender_user.to_owned(), int!(100))]);
let mut users = BTreeMap::from_iter([(sender_user.to_owned(), int!(100))]);
if preset == RoomPreset::TrustedPrivateChat {
for recipient_user in &invitees {
power_levels_to_grant.insert(recipient_user.clone(), int!(100));
for invite in &body.invite {
if services.users.user_is_ignored(sender_user, invite).await {
continue;
} else if services.users.user_is_ignored(invite, sender_user).await {
// silently drop the invite to the recipient if they've been ignored by the
// sender, pretend it worked
continue;
}
users.insert(invite.clone(), int!(100));
}
}
@@ -316,7 +289,7 @@ pub(crate) async fn create_room_route(
}
}
} else {
power_levels_to_grant.insert(sender_user.to_owned(), int!(100));
users.insert(sender_user.to_owned(), int!(100));
creators.clear(); // If this vec is not empty, default_power_levels_content will
// treat this as a v12 room
}
@@ -324,7 +297,7 @@ pub(crate) async fn create_room_route(
let power_levels_content = default_power_levels_content(
body.power_level_content_override.as_ref(),
&body.visibility,
power_levels_to_grant,
users,
creators,
)?;
@@ -486,9 +459,17 @@ pub(crate) async fn create_room_route(
// 8. Events implied by invite (and TODO: invite_3pid)
drop(state_lock);
for recipient_user in &invitees {
for user_id in &body.invite {
if services.users.user_is_ignored(sender_user, user_id).await {
continue;
} else if services.users.user_is_ignored(user_id, sender_user).await {
// silently drop the invite to the recipient if they've been ignored by the
// sender, pretend it worked
continue;
}
if let Err(e) =
invite_helper(&services, sender_user, recipient_user, &room_id, None, body.is_direct)
invite_helper(&services, sender_user, user_id, &room_id, None, body.is_direct)
.boxed()
.await
{

View File

@@ -2,7 +2,7 @@
use axum::extract::State;
use conduwuit::{
Err, Error, Event, Result, RoomVersion, debug, err, info,
Err, Error, Event, Result, debug, err, info,
matrix::{StateKey, pdu::PduBuilder},
};
use futures::{FutureExt, StreamExt};
@@ -68,77 +68,37 @@ pub(crate) async fn upgrade_room_route(
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
}
// First, check if the user has permission to upgrade the room (send tombstone
// event)
let old_room_state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
// Check tombstone permission by attempting to create (but not send) the event
// Note that this does internally call the policy server with a fake room ID,
// which may not be good?
let tombstone_test_result = services
.rooms
.timeline
.create_hash_and_sign_event(
PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent {
body: "This room has been replaced".to_owned(),
replacement_room: RoomId::new(services.globals.server_name()),
}),
sender_user,
Some(&body.room_id),
&old_room_state_lock,
)
.await;
if let Err(_e) = tombstone_test_result {
return Err!(Request(Forbidden("User does not have permission to upgrade this room.")));
}
drop(old_room_state_lock);
// Create a replacement room
let room_features = RoomVersion::new(&body.new_version)?;
let replacement_room_owned = if !room_features.room_ids_as_hashes {
Some(RoomId::new(services.globals.server_name()))
} else {
None
};
let replacement_room: Option<&RoomId> = replacement_room_owned.as_ref().map(AsRef::as_ref);
let replacement_room_tmp = match replacement_room {
| Some(v) => v,
| None => &RoomId::new(services.globals.server_name()),
};
let replacement_room = RoomId::new(services.globals.server_name());
let _short_id = services
.rooms
.short
.get_or_create_shortroomid(replacement_room_tmp)
.get_or_create_shortroomid(&replacement_room)
.await;
// For pre-v12 rooms, send tombstone before creating replacement room
let tombstone_event_id = if !room_features.room_ids_as_hashes {
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
// Send a m.room.tombstone event to the old room to indicate that it is not
// intended to be used any further
let tombstone_event_id = services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent {
body: "This room has been replaced".to_owned(),
replacement_room: replacement_room.unwrap().to_owned(),
}),
sender_user,
Some(&body.room_id),
&state_lock,
)
.await?;
// Change lock to replacement room
drop(state_lock);
Some(tombstone_event_id)
} else {
None
};
let state_lock = services.rooms.state.mutex.lock(replacement_room_tmp).await;
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
// Send a m.room.tombstone event to the old room to indicate that it is not
// intended to be used any further Fail if the sender does not have the required
// permissions
let tombstone_event_id = services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent {
body: "This room has been replaced".to_owned(),
replacement_room: replacement_room.clone(),
}),
sender_user,
Some(&body.room_id),
&state_lock,
)
.await?;
// Change lock to replacement room
drop(state_lock);
let state_lock = services.rooms.state.mutex.lock(&replacement_room).await;
// Get the old room creation event
let mut create_event_content: CanonicalJsonObject = services
@@ -151,7 +111,7 @@ pub(crate) async fn upgrade_room_route(
// Use the m.room.tombstone event as the predecessor
let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
body.room_id.clone(),
tombstone_event_id,
Some(tombstone_event_id),
));
// Send a m.room.create event containing a predecessor field and the applicable
@@ -172,7 +132,6 @@ pub(crate) async fn upgrade_room_route(
// "creator" key no longer exists in V11 rooms
create_event_content.remove("creator");
},
// TODO(hydra): additional_creators
}
}
@@ -200,7 +159,7 @@ pub(crate) async fn upgrade_room_route(
return Err(Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"));
}
let create_event_id = services
services
.rooms
.timeline
.build_and_append_pdu(
@@ -214,18 +173,11 @@ pub(crate) async fn upgrade_room_route(
timestamp: None,
},
sender_user,
replacement_room,
Some(&replacement_room),
&state_lock,
)
.boxed()
.await?;
let create_id = create_event_id.as_str().replace('$', "!");
let (replacement_room, state_lock) = if room_features.room_ids_as_hashes {
let parsed_room_id = RoomId::parse(&create_id)?;
(Some(parsed_room_id), services.rooms.state.mutex.lock(parsed_room_id).await)
} else {
(replacement_room, state_lock)
};
// Join the new room
services
@@ -252,7 +204,7 @@ pub(crate) async fn upgrade_room_route(
timestamp: None,
},
sender_user,
replacement_room,
Some(&replacement_room),
&state_lock,
)
.boxed()
@@ -291,7 +243,7 @@ pub(crate) async fn upgrade_room_route(
..Default::default()
},
sender_user,
replacement_room,
Some(&replacement_room),
&state_lock,
)
.boxed()
@@ -316,7 +268,7 @@ pub(crate) async fn upgrade_room_route(
services
.rooms
.alias
.set_alias(alias, replacement_room.unwrap(), sender_user)?;
.set_alias(alias, &replacement_room, sender_user)?;
}
// Get the old room power levels
@@ -358,27 +310,6 @@ pub(crate) async fn upgrade_room_route(
drop(state_lock);
// For v12 rooms, send tombstone AFTER creating replacement room
if room_features.room_ids_as_hashes {
let old_room_state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
// For v12 rooms, no event reference in predecessor due to cyclic dependency -
// could best effort one maybe?
services
.rooms
.timeline
.build_and_append_pdu(
PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent {
body: "This room has been replaced".to_owned(),
replacement_room: replacement_room.unwrap().to_owned(),
}),
sender_user,
Some(&body.room_id),
&old_room_state_lock,
)
.await?;
drop(old_room_state_lock);
}
// Check if the old room has a space parent, and if so, whether we should update
// it (m.space.parent, room_id)
let parents = services
@@ -403,9 +334,8 @@ pub(crate) async fn upgrade_room_route(
continue;
};
debug!(
"Updating space {space_id} child event for room {} to {}",
&body.room_id,
replacement_room.unwrap()
"Updating space {space_id} child event for room {} to {replacement_room}",
&body.room_id
);
// First, drop the space's child event
let state_lock = services.rooms.state.mutex.lock(space_id).await;
@@ -429,10 +359,7 @@ pub(crate) async fn upgrade_room_route(
.await
.ok();
// Now, add a new child event for the replacement room
debug!(
"Adding space child event for room {} in space {space_id}",
replacement_room.unwrap()
);
debug!("Adding space child event for room {replacement_room} in space {space_id}");
services
.rooms
.timeline
@@ -445,7 +372,7 @@ pub(crate) async fn upgrade_room_route(
suggested: child.suggested,
})
.expect("event is valid, we just created it"),
state_key: Some(replacement_room.unwrap().as_str().into()),
state_key: Some(replacement_room.as_str().into()),
..Default::default()
},
sender_user,
@@ -456,15 +383,12 @@ pub(crate) async fn upgrade_room_route(
.await
.ok();
debug!(
"Finished updating space {space_id} child event for room {} to {}",
&body.room_id,
replacement_room.unwrap()
"Finished updating space {space_id} child event for room {} to {replacement_room}",
&body.room_id
);
drop(state_lock);
}
// Return the replacement room id
Ok(upgrade_room::v3::Response {
replacement_room: replacement_room.unwrap().to_owned(),
})
Ok(upgrade_room::v3::Response { replacement_room })
}

View File

@@ -44,7 +44,7 @@ pub(crate) async fn get_hierarchy_route(
.as_ref()
.and_then(|s| PaginationToken::from_str(s).ok());
// Should prevent unexpected behaviour in (bad) clients
// Should prevent unexpeded behaviour in (bad) clients
if let Some(ref token) = key {
if token.suggested_only != body.suggested_only || token.max_depth != max_depth {
return Err!(Request(InvalidParam(

View File

@@ -60,10 +60,7 @@
use service::rooms::short::{ShortEventId, ShortStateKey};
use super::{load_timeline, share_encrypted_room};
use crate::{
Ruma, RumaResponse,
client::{ignored_filter, is_ignored_invite},
};
use crate::{Ruma, RumaResponse, client::ignored_filter};
#[derive(Default)]
struct StateChanges {
@@ -241,13 +238,6 @@ pub(crate) async fn build_sync_events(
.rooms
.state_cache
.rooms_invited(sender_user)
.wide_filter_map(async |(room_id, invite_state)| {
if is_ignored_invite(services, sender_user, &room_id).await {
None
} else {
Some((room_id, invite_state))
}
})
.fold_default(|mut invited_rooms: BTreeMap<_, _>, (room_id, invite_state)| async move {
let invite_count = services
.rooms

View File

@@ -11,7 +11,6 @@
utils::{
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated},
stream::WidebandExt,
},
warn,
};
@@ -40,7 +39,7 @@
use super::{load_timeline, share_encrypted_room};
use crate::{
Ruma,
client::{DEFAULT_BUMP_TYPES, ignored_filter, is_ignored_invite},
client::{DEFAULT_BUMP_TYPES, ignored_filter},
};
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
@@ -103,13 +102,6 @@ pub(crate) async fn sync_events_v4_route(
.rooms
.state_cache
.rooms_invited(sender_user)
.wide_filter_map(async |(room_id, invite_state)| {
if is_ignored_invite(&services, sender_user, &room_id).await {
None
} else {
Some((room_id, invite_state))
}
})
.map(|r| r.0)
.collect()
.await;

View File

@@ -14,7 +14,6 @@
BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt,
future::ReadyEqExt,
math::{ruma_from_usize, usize_from_ruma},
stream::WidebandExt,
},
warn,
};
@@ -39,7 +38,7 @@
use super::share_encrypted_room;
use crate::{
Ruma,
client::{DEFAULT_BUMP_TYPES, ignored_filter, is_ignored_invite, sync::load_timeline},
client::{DEFAULT_BUMP_TYPES, ignored_filter, sync::load_timeline},
};
type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request);
@@ -107,13 +106,6 @@ pub(crate) async fn sync_events_v5_route(
.rooms
.state_cache
.rooms_invited(sender_user)
.wide_filter_map(async |(room_id, invite_state)| {
if is_ignored_invite(services, sender_user, &room_id).await {
None
} else {
Some((room_id, invite_state))
}
})
.map(|r| r.0)
.collect::<Vec<OwnedRoomId>>();
@@ -320,7 +312,6 @@ async fn handle_lists<'a, Rooms, AllRooms>(
for mut range in ranges {
range.0 = uint!(0);
range.1 = range.1.checked_add(uint!(1)).unwrap_or(range.1);
range.1 = range
.1
.clamp(range.0, UInt::try_from(active_rooms.len()).unwrap_or(UInt::MAX));

View File

@@ -59,7 +59,6 @@ pub(crate) async fn get_supported_versions_route(
("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */
("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */
("uk.timedout.msc4323".to_owned(), true), /* agnostic suspend (https://github.com/matrix-org/matrix-spec-proposals/pull/4323) */
("org.matrix.msc4155".to_owned(), true), /* invite filtering (https://github.com/matrix-org/matrix-spec-proposals/pull/4155) */
]),
};

View File

@@ -78,7 +78,7 @@ pub(crate) async fn well_known_support(
while let Some(user_id) = stream.next().await {
// Skip server user
if *user_id == services.globals.server_user {
continue;
break;
}
contacts.push(Contact {
role: role_value.clone(),

View File

@@ -226,7 +226,6 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
.ruma_route(&server::well_known_server)
.ruma_route(&server::get_content_route)
.ruma_route(&server::get_content_thumbnail_route)
.ruma_route(&server::get_edutypes_route)
.route("/_conduwuit/local_user_count", get(client::conduwuit_local_user_count))
.route("/_continuwuity/local_user_count", get(client::conduwuit_local_user_count));
} else {

View File

@@ -34,19 +34,6 @@ pub(super) async fn from(
let max_body_size = services.server.config.max_request_size;
// Check if the Content-Length header is present and valid, saves us streaming
// the response into memory
if let Some(content_length) = parts.headers.get(http::header::CONTENT_LENGTH) {
if let Ok(content_length) = content_length
.to_str()
.map(|s| s.parse::<usize>().unwrap_or_default())
{
if content_length > max_body_size {
return Err(err!(Request(TooLarge("Request body too large"))));
}
}
}
let body = axum::body::to_bytes(body, max_body_size)
.await
.map_err(|e| err!(Request(TooLarge("Request body too large: {e}"))))?;

View File

@@ -1,19 +0,0 @@
use axum::extract::State;
use conduwuit::Result;
use ruma::api::federation::edutypes::get_edutypes;
use crate::Ruma;
/// # `GET /_matrix/federation/v1/edutypes`
///
/// Lists EDU types we wish to receive
pub(crate) async fn get_edutypes_route(
State(services): State<crate::State>,
_body: Ruma<get_edutypes::unstable::Request>,
) -> Result<get_edutypes::unstable::Response> {
Ok(get_edutypes::unstable::Response {
typing: services.config.allow_incoming_typing,
presence: services.config.allow_incoming_presence,
receipt: services.config.allow_incoming_read_receipts,
})
}

View File

@@ -61,16 +61,13 @@ pub(crate) async fn create_invite_route(
let mut signed_event = utils::to_canonical_object(&body.event)
.map_err(|_| err!(Request(InvalidParam("Invite event is invalid."))))?;
let recipient_user: OwnedUserId = signed_event
let invited_user: OwnedUserId = signed_event
.get("state_key")
.try_into()
.map(UserId::to_owned)
.map_err(|e| err!(Request(InvalidParam("Invalid state_key property: {e}"))))?;
if !services
.globals
.server_is_ours(recipient_user.server_name())
{
if !services.globals.server_is_ours(invited_user.server_name()) {
return Err!(Request(InvalidParam("User does not belong to this homeserver.")));
}
@@ -78,7 +75,7 @@ pub(crate) async fn create_invite_route(
services
.rooms
.event_handler
.acl_check(recipient_user.server_name(), &body.room_id)
.acl_check(invited_user.server_name(), &body.room_id)
.await?;
services
@@ -92,19 +89,18 @@ pub(crate) async fn create_invite_route(
// Add event_id back
signed_event.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.to_string()));
let sender_user: &UserId = signed_event
let sender: &UserId = signed_event
.get("sender")
.try_into()
.map_err(|e| err!(Request(InvalidParam("Invalid sender property: {e}"))))?;
if services.rooms.metadata.is_banned(&body.room_id).await
&& !services.users.is_admin(&recipient_user).await
&& !services.users.is_admin(&invited_user).await
{
return Err!(Request(Forbidden("This room is banned on this homeserver.")));
}
if services.config.block_non_admin_invites && !services.users.is_admin(&recipient_user).await
{
if services.config.block_non_admin_invites && !services.users.is_admin(&invited_user).await {
return Err!(Request(Forbidden("This server does not allow room invites.")));
}
@@ -135,9 +131,9 @@ pub(crate) async fn create_invite_route(
.state_cache
.update_membership(
&body.room_id,
&recipient_user,
&invited_user,
RoomMemberEventContent::new(MembershipState::Invite),
sender_user,
sender,
Some(invite_state),
body.via.clone(),
true,
@@ -145,7 +141,7 @@ pub(crate) async fn create_invite_route(
.await?;
for appservice in services.appservice.read().await.values() {
if appservice.is_user_match(&recipient_user) {
if appservice.is_user_match(&invited_user) {
services
.sending
.send_appservice_request(

View File

@@ -1,6 +1,6 @@
use axum::extract::State;
use conduwuit::{
Err, Error, Result, debug_info, info, matrix::pdu::PduBuilder, utils::IterStream, warn,
Err, Error, Result, debug_info, matrix::pdu::PduBuilder, utils::IterStream, warn,
};
use conduwuit_service::Services;
use futures::StreamExt;
@@ -22,7 +22,6 @@
/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}`
///
/// Creates a join template.
#[tracing::instrument(skip_all, fields(room_id = %body.room_id, user_id = %body.user_id, origin = %body.origin()))]
pub(crate) async fn create_join_event_template_route(
State(services): State<crate::State>,
body: Ruma<prepare_join_event::v1::Request>,
@@ -73,16 +72,11 @@ pub(crate) async fn create_join_event_template_route(
}
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
let is_invited = services
.rooms
.state_cache
.is_invited(&body.user_id, &body.room_id)
.await;
let join_authorized_via_users_server: Option<OwnedUserId> = {
use RoomVersionId::*;
if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) || is_invited {
// room version does not support restricted join rules, or the user is currently
// already invited
if matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) {
// room version does not support restricted join rules
None
} else if user_can_perform_restricted_join(
&services,
@@ -109,10 +103,6 @@ pub(crate) async fn create_join_event_template_route(
.await
.map(ToOwned::to_owned)
else {
info!(
"No local user is able to authorize the join of {} into {}",
&body.user_id, &body.room_id
);
return Err!(Request(UnableToGrantJoin(
"No user on this server is able to assist in joining."
)));
@@ -177,7 +167,6 @@ pub(crate) async fn user_can_perform_restricted_join(
)
.await
else {
// No join rules means there's nothing to authorise (defaults to invite)
return Ok(false);
};

View File

@@ -1,5 +1,4 @@
pub(super) mod backfill;
pub(super) mod edutypes;
pub(super) mod event;
pub(super) mod event_auth;
pub(super) mod get_missing_events;
@@ -24,7 +23,6 @@
pub(super) mod well_known;
pub(super) use backfill::*;
pub(super) use edutypes::*;
pub(super) use event::*;
pub(super) use event_auth::*;
pub(super) use get_missing_events::*;

View File

@@ -92,7 +92,7 @@ ruma.workspace = true
sanitize-filename.workspace = true
serde_json.workspace = true
serde_regex.workspace = true
serde-saphyr.workspace = true
serde_yml.workspace = true
serde.workspace = true
smallvec.workspace = true
smallstr.workspace = true

View File

@@ -1128,23 +1128,6 @@ pub struct Config {
#[serde(default = "true_fn")]
pub rocksdb_bottommost_compression: bool,
/// Compression algorithm for RocksDB's Write-Ahead-Log (WAL).
///
/// At present, only ZSTD compression is supported by RocksDB for WAL
/// compression. Enabling this can reduce WAL size at the expense of some
/// CPU usage during writes.
///
/// The options are:
/// - "none" = No compression
/// - "zstd" = ZSTD compression
///
/// For more information on WAL compression, see:
/// https://github.com/facebook/rocksdb/wiki/WAL-Compression
///
/// default: "zstd"
#[serde(default = "default_rocksdb_wal_compression")]
pub rocksdb_wal_compression: String,
/// Database recovery mode (for RocksDB WAL corruption).
///
/// Use this option when the server reports corruption and refuses to start.
@@ -1727,19 +1710,6 @@ pub struct Config {
#[serde(default)]
pub block_non_admin_invites: bool,
/// Enable or disable making requests to MSC4284 Policy Servers.
/// It is recommended you keep this enabled unless you experience frequent
/// connectivity issues, such as in a restricted networking environment.
#[serde(default = "true_fn")]
pub enable_msc4284_policy_servers: bool,
/// Enable running locally generated events through configured MSC4284
/// policy servers. You may wish to disable this if your server is
/// single-user for a slight speed benefit in some rooms, but otherwise
/// should leave it enabled.
#[serde(default = "true_fn")]
pub policy_server_check_own_events: bool,
/// Allow admins to enter commands in rooms other than "#admins" (admin
/// room) by prefixing your message with "\!admin" or "\\!admin" followed up
/// a normal continuwuity admin command. The reply will be publicly visible
@@ -2471,8 +2441,6 @@ fn default_rocksdb_compression_algo() -> String {
.to_owned()
}
fn default_rocksdb_wal_compression() -> String { "zstd".to_owned() }
/// Default RocksDB compression level is 32767, which is internally read by
/// RocksDB as the default magic number and translated to the library's default
/// compression level as they all differ. See their `kDefaultCompressionLevel`.

View File

@@ -83,9 +83,7 @@ pub enum Error {
#[error(transparent)]
TypedHeader(#[from] axum_extra::typed_header::TypedHeaderRejection),
#[error(transparent)]
YamlDe(#[from] serde_saphyr::Error),
#[error(transparent)]
YamlSer(#[from] serde_saphyr::ser_error::Error),
Yaml(#[from] serde_yml::Error),
// ruma/conduwuit
#[error("Arithmetic operation failed: {0}")]

View File

@@ -73,7 +73,6 @@ pub(super) fn bad_request_code(kind: &ErrorKind) -> StatusCode {
| ThreepidAuthFailed
| UserDeactivated
| ThreepidDenied
| InviteBlocked
| WrongRoomKeysVersion { .. }
| Forbidden { .. } => StatusCode::FORBIDDEN,

View File

@@ -5,17 +5,13 @@
use std::{collections::BTreeMap, sync::OnceLock};
use crate::utils::exchange;
use crate::{SyncMutex, utils::exchange};
/// Raw capture of rustc flags used to build each crate in the project. Informed
/// by rustc_flags_capture macro (one in each crate's mod.rs). This is
/// done during static initialization which is why it's mutex-protected and pub.
/// Should not be written to by anything other than our macro.
///
/// We specifically use a std mutex here because parking_lot cannot be used
/// after thread local storage is destroyed on MacOS.
pub static FLAGS: std::sync::Mutex<BTreeMap<&str, &[&str]>> =
std::sync::Mutex::new(BTreeMap::new());
pub static FLAGS: SyncMutex<BTreeMap<&str, &[&str]>> = SyncMutex::new(BTreeMap::new());
/// Processed list of enabled features across all project crates. This is
/// generated from the data in FLAGS.
@@ -28,7 +24,6 @@ fn init_features() -> Vec<&'static str> {
let mut features = Vec::new();
FLAGS
.lock()
.expect("locked")
.iter()
.for_each(|(_, flags)| append_features(&mut features, flags));

View File

@@ -200,15 +200,11 @@ pub async fn auth_check<E, F, Fut>(
if incoming_event.room_id().is_some() {
let Some(room_id_server_name) = incoming_event.room_id().unwrap().server_name()
else {
warn!("legacy room ID has no server name");
warn!("room ID has no servername");
return Ok(false);
};
if room_id_server_name != sender.server_name() {
warn!(
expected = %sender.server_name(),
received = %room_id_server_name,
"server name of legacy room ID does not match server name of sender"
);
warn!("servername of room ID does not match servername of sender");
return Ok(false);
}
}
@@ -219,12 +215,12 @@ pub async fn auth_check<E, F, Fut>(
.room_version
.is_some_and(|v| v.deserialize().is_err())
{
warn!("unsupported room version found in m.room.create event");
warn!("invalid room version found in m.room.create event");
return Ok(false);
}
if room_version.room_ids_as_hashes && incoming_event.room_id().is_some() {
warn!("room create event incorrectly claims to have a room ID when it should not");
warn!("room create event incorrectly claims a room ID");
return Ok(false);
}
@@ -233,7 +229,7 @@ pub async fn auth_check<E, F, Fut>(
{
// If content has no creator field, reject
if content.creator.is_none() {
warn!("m.room.create event incorrectly omits 'creator' field");
warn!("no creator field found in m.room.create content");
return Ok(false);
}
}
@@ -286,19 +282,16 @@ pub async fn auth_check<E, F, Fut>(
.room_version
.is_some_and(|v| v.deserialize().is_err())
{
warn!(
create_event_id = %room_create_event.event_id(),
"unsupported room version found in m.room.create event"
);
warn!("invalid room version found in m.room.create event");
return Ok(false);
}
let expected_room_id = room_create_event.room_id_or_hash();
if incoming_event.room_id().expect("event must have a room ID") != expected_room_id {
if incoming_event.room_id().unwrap() != expected_room_id {
warn!(
expected = %expected_room_id,
received = %incoming_event.room_id().unwrap(),
"room_id of incoming event ({}) does not match that of the m.room.create event ({})",
"room_id of incoming event ({}) does not match room_id of m.room.create event ({})",
incoming_event.room_id().unwrap(),
expected_room_id,
);
@@ -311,15 +304,12 @@ pub async fn auth_check<E, F, Fut>(
.auth_events()
.any(|id| id == room_create_event.event_id());
if room_version.room_ids_as_hashes && claims_create_event {
warn!("event incorrectly references m.room.create event in auth events");
warn!("m.room.create event incorrectly found in auth events");
return Ok(false);
} else if !room_version.room_ids_as_hashes && !claims_create_event {
// If the create event is not referenced in the event's auth events, and this is
// a v11 room, reject
warn!(
missing = %room_create_event.event_id(),
"event incorrectly did not reference an m.room.create in its auth events"
);
warn!("no m.room.create event found in auth events");
return Ok(false);
}
@@ -328,7 +318,7 @@ pub async fn auth_check<E, F, Fut>(
warn!(
expected = %expected_room_id,
received = %pe.room_id().unwrap(),
"room_id of referenced power levels event does not match that of the m.room.create event"
"room_id of power levels event does not match room_id of m.room.create event"
);
return Ok(false);
}
@@ -342,9 +332,8 @@ pub async fn auth_check<E, F, Fut>(
&& room_create_event.sender().server_name() != incoming_event.sender().server_name()
{
warn!(
sender = %incoming_event.sender(),
create_sender = %room_create_event.sender(),
"room is not federated and event's sender domain does not match create event's sender domain"
"room is not federated and event's sender domain does not match create event's \
sender domain"
);
return Ok(false);
}
@@ -427,6 +416,7 @@ pub async fn auth_check<E, F, Fut>(
&user_for_join_auth_membership,
&room_create_event,
)? {
warn!("membership change not valid for some reason");
return Ok(false);
}
@@ -439,7 +429,7 @@ pub async fn auth_check<E, F, Fut>(
let sender_member_event = match sender_member_event {
| Some(mem) => mem,
| None => {
warn!("sender has no membership event");
warn!("sender not found in room");
return Ok(false);
},
};
@@ -450,7 +440,7 @@ pub async fn auth_check<E, F, Fut>(
!= expected_room_id
{
warn!(
"room_id of incoming event ({}) does not match that of the m.room.create event ({})",
"room_id of incoming event ({}) does not match room_id of m.room.create event ({})",
sender_member_event
.room_id()
.expect("event must have a room ID"),
@@ -463,7 +453,8 @@ pub async fn auth_check<E, F, Fut>(
from_json_str(sender_member_event.content().get())?;
let Some(membership_state) = sender_membership_event_content.membership else {
warn!(
?sender_membership_event_content,
sender_membership_event_content = format!("{sender_membership_event_content:?}"),
event_id = format!("{}", incoming_event.event_id()),
"Sender membership event content missing membership field"
);
return Err(Error::InvalidPdu("Missing membership field".to_owned()));
@@ -471,11 +462,7 @@ pub async fn auth_check<E, F, Fut>(
let membership_state = membership_state.deserialize()?;
if !matches!(membership_state, MembershipState::Join) {
warn!(
%sender,
?membership_state,
"sender cannot send events without being joined to the room"
);
warn!("sender's membership is not join");
return Ok(false);
}
@@ -535,12 +522,7 @@ pub async fn auth_check<E, F, Fut>(
};
if sender_power_level < invite_level {
warn!(
%sender,
has=?sender_power_level,
required=?invite_level,
"sender cannot send invites in this room"
);
warn!("sender's cannot send invites in this room");
return Ok(false);
}
@@ -552,11 +534,7 @@ pub async fn auth_check<E, F, Fut>(
// level, reject If the event has a state_key that starts with an @ and does
// not match the sender, reject.
if !can_send_event(incoming_event, power_levels_event.as_ref(), sender_power_level) {
warn!(
%sender,
event_type=?incoming_event.kind(),
"sender cannot send event"
);
warn!("user cannot send event");
return Ok(false);
}
@@ -601,12 +579,6 @@ pub async fn auth_check<E, F, Fut>(
};
if !check_redaction(room_version, incoming_event, sender_power_level, redact_level)? {
warn!(
%sender,
?sender_power_level,
?redact_level,
"redaction event was not allowed"
);
return Ok(false);
}
}
@@ -615,21 +587,15 @@ pub async fn auth_check<E, F, Fut>(
Ok(true)
}
fn is_creator<EV>(
v: &RoomVersion,
c: &BTreeSet<OwnedUserId>,
ce: &EV,
user_id: &UserId,
have_pls: bool,
) -> bool
fn is_creator<EV>(v: &RoomVersion, c: &BTreeSet<OwnedUserId>, ce: &EV, user_id: &UserId) -> bool
where
EV: Event + Send + Sync,
{
if v.explicitly_privilege_room_creators {
c.contains(user_id)
} else if v.use_room_create_sender && !have_pls {
} else if v.use_room_create_sender {
ce.sender() == user_id
} else if !have_pls {
} else {
#[allow(deprecated)]
let creator = from_json_str::<RoomCreateEventContent>(ce.content().get())
.unwrap()
@@ -638,8 +604,6 @@ fn is_creator<EV>(
.unwrap();
creator == user_id
} else {
false
}
}
@@ -732,11 +696,10 @@ struct GetThirdPartyInvite {
}
trace!(?creators, "creators for room");
let join_rules = if let Some(jr) = &join_rules_event {
from_json_str::<RoomJoinRulesEventContent>(jr.content().get())?.join_rule
} else {
JoinRule::Invite
};
let mut join_rules = JoinRule::Invite;
if let Some(jr) = &join_rules_event {
join_rules = from_json_str::<RoomJoinRulesEventContent>(jr.content().get())?.join_rule;
}
let power_levels_event_id = power_levels_event.as_ref().map(Event::event_id);
let sender_membership_event_id = sender_membership_event.as_ref().map(Event::event_id);
@@ -762,41 +725,16 @@ struct GetThirdPartyInvite {
(int!(0), int!(0))
};
let user_joined = user_for_join_auth_membership == &MembershipState::Join;
let okay_power = is_creator(
room_version,
&creators,
create_room,
user_for_join_auth,
power_levels_event.as_ref().is_some(),
) || auth_user_pl >= invite_level;
trace!(
auth_user_pl=?auth_user_pl,
invite_level=?invite_level,
user_joined=?user_joined,
okay_power=?okay_power,
passing=?(user_joined && okay_power),
"user for join auth is valid check details"
);
let okay_power = is_creator(room_version, &creators, create_room, user_for_join_auth)
|| auth_user_pl >= invite_level;
user_joined && okay_power
} else {
// No auth user was given
trace!("No auth user given for join auth");
false
};
let sender_creator = is_creator(
room_version,
&creators,
create_room,
sender,
power_levels_event.as_ref().is_some(),
);
let target_creator = is_creator(
room_version,
&creators,
create_room,
target_user,
power_levels_event.as_ref().is_some(),
);
let sender_creator = is_creator(room_version, &creators, create_room, sender);
let target_creator = is_creator(room_version, &creators, create_room, target_user);
Ok(match target_membership {
| MembershipState::Join => {
@@ -813,7 +751,7 @@ struct GetThirdPartyInvite {
if prev_event_is_create_event && no_more_prev_events {
trace!(
%sender,
sender = %sender,
target_user = %target_user,
?sender_creator,
?target_creator,
@@ -833,33 +771,22 @@ struct GetThirdPartyInvite {
);
if sender != target_user {
// If the sender does not match state_key, reject.
warn!(
%sender,
target_user = %target_user,
"sender cannot join on behalf of another user"
);
warn!("Can't make other user join");
false
} else if target_user_current_membership == MembershipState::Ban {
// If the sender is banned, reject.
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
"sender cannot join as they are banned from the room"
);
warn!(?target_user_membership_event_id, "Banned user can't join");
false
} else {
match join_rules {
| JoinRule::Invite =>
if !membership_allows_join {
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
membership = ?target_user_current_membership,
"sender cannot join as they are not invited to the invite-only room"
membership=?target_user_current_membership,
"Join rule is invite but membership does not allow join"
);
false
} else {
trace!(sender=%sender, "sender is invited to room, allowing join");
true
},
| JoinRule::Knock if !room_version.allow_knocking => {
@@ -869,14 +796,11 @@ struct GetThirdPartyInvite {
| JoinRule::Knock =>
if !membership_allows_join {
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
membership=?target_user_current_membership,
"sender cannot join a knock room without being invited or already joined"
"Join rule is knock but membership does not allow join"
);
false
} else {
trace!(sender=%sender, "sender is invited or already joined to room, allowing join");
true
},
| JoinRule::KnockRestricted(_) if !room_version.knock_restricted_join_rule =>
@@ -887,56 +811,38 @@ struct GetThirdPartyInvite {
false
},
| JoinRule::KnockRestricted(_) => {
if membership_allows_join || user_for_join_auth_is_valid {
trace!(
%sender,
%membership_allows_join,
%user_for_join_auth_is_valid,
"sender is invited, already joined to, or authorised to join the room, allowing join"
);
let valid_join = user_for_join_auth_is_valid
|| sender_membership == MembershipState::Join;
if membership_allows_join || valid_join {
true
} else {
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
membership=?target_user_current_membership,
%user_for_join_auth_is_valid,
?user_for_join_auth,
"sender cannot join as they are not invited nor already joined to the room, nor was a \
valid authorising user given to permit the join"
"Join rule is a restricted one, but no valid authorising user \
was given and the sender's current membership does not permit \
a join transition"
);
false
}
},
| JoinRule::Restricted(_) =>
if membership_allows_join || user_for_join_auth_is_valid {
trace!(
%sender,
%membership_allows_join,
%user_for_join_auth_is_valid,
"sender is invited, already joined to, or authorised to join the room, allowing join"
);
true
} else {
if !user_for_join_auth_is_valid
&& sender_membership != MembershipState::Join
{
warn!(
%sender,
membership_event_id = ?target_user_membership_event_id,
membership=?target_user_current_membership,
%user_for_join_auth_is_valid,
?user_for_join_auth,
"sender cannot join as they are not invited nor already joined to the room, nor was a \
valid authorising user given to permit the join"
"Join rule is a restricted one but no valid authorising user \
was given"
);
false
} else {
true
},
| JoinRule::Public => {
trace!(%sender, "join rule is public, allowing join");
true
},
| JoinRule::Public => true,
| _ => {
warn!(
join_rule=?join_rules,
"Join rule is unknown, or the rule's conditions were not met"
membership=?target_user_current_membership,
"Unknown join rule doesn't allow joining, or the rule's conditions were not met"
);
false
},
@@ -963,23 +869,16 @@ struct GetThirdPartyInvite {
}
allow
},
| _ =>
if !sender_is_joined {
warn!(
%sender,
?sender_membership_event_id,
?sender_membership,
"sender cannot produce an invite without being joined to the room",
);
false
} else if matches!(
target_user_current_membership,
MembershipState::Join | MembershipState::Ban
) {
| _ => {
if !sender_is_joined
|| target_user_current_membership == MembershipState::Join
|| target_user_current_membership == MembershipState::Ban
{
warn!(
?target_user_membership_event_id,
?target_user_current_membership,
"cannot invite a user who is banned or already joined",
?sender_membership_event_id,
"Can't invite user if sender not joined or the user is currently \
joined or banned",
);
false
} else {
@@ -989,124 +888,56 @@ struct GetThirdPartyInvite {
.is_some();
if !allow {
warn!(
%sender,
has=?sender_power,
required=?power_levels.invite,
"sender does not have enough power to produce invites",
?target_user_membership_event_id,
?power_levels_event_id,
"User does not have enough power to invite",
);
}
trace!(
%sender,
?sender_membership_event_id,
?sender_membership,
?target_user_membership_event_id,
?target_user_current_membership,
sender_pl=?sender_power,
required_pl=?power_levels.invite,
"allowing invite"
);
allow
},
}
},
}
},
| MembershipState::Leave => {
let can_unban = if target_user_current_membership == MembershipState::Ban {
sender_creator || sender_power.filter(|&p| p >= &power_levels.ban).is_some()
} else {
true
};
let can_kick = if !matches!(
target_user_current_membership,
MembershipState::Ban | MembershipState::Leave
) {
if sender_creator {
// sender is a creator
true
} else if sender_power.filter(|&p| p >= &power_levels.kick).is_none() {
// sender lacks kick power level
false
} else if let Some(sp) = sender_power {
if let Some(tp) = target_power {
// sender must have more power than target
sp > tp
} else {
// target has default power level
true
}
} else {
// sender has default power level
false
}
} else {
true
};
| MembershipState::Leave =>
if sender == target_user {
// self-leave
// let allow = target_user_current_membership == MembershipState::Join
// || target_user_current_membership == MembershipState::Invite
// || target_user_current_membership == MembershipState::Knock;
let allow = matches!(
target_user_current_membership,
MembershipState::Join | MembershipState::Invite | MembershipState::Knock
);
let allow = target_user_current_membership == MembershipState::Join
|| target_user_current_membership == MembershipState::Invite
|| target_user_current_membership == MembershipState::Knock;
if !allow {
warn!(
%sender,
current_membership_event_id=?target_user_membership_event_id,
current_membership=?target_user_current_membership,
"sender cannot leave as they are not already knocking on, invited to, or joined to the room"
?target_user_membership_event_id,
?target_user_current_membership,
"Can't leave if sender is not already invited, knocked, or joined"
);
}
trace!(sender=%sender, "allowing leave");
allow
} else if !sender_is_joined {
} else if !sender_is_joined
|| target_user_current_membership == MembershipState::Ban
&& (sender_creator
|| sender_power.filter(|&p| p < &power_levels.ban).is_some())
{
warn!(
%sender,
?target_user_membership_event_id,
?sender_membership_event_id,
"sender cannot kick another user as they are not joined to the room",
);
false
} else if !(can_unban && can_kick) {
// If the target is banned, only a room creator or someone with ban power
// level can unban them
warn!(
%sender,
?target_user_membership_event_id,
?power_levels_event_id,
"sender lacks the power level required to unban users",
);
false
} else if !can_kick {
warn!(
%sender,
%target_user,
?target_user_membership_event_id,
?target_user_current_membership,
?power_levels_event_id,
"sender does not have enough power to kick the target",
"Can't kick if sender not joined or user is already banned",
);
false
} else {
trace!(
%sender,
%target_user,
?target_user_membership_event_id,
?target_user_current_membership,
sender_pl=?sender_power,
target_pl=?target_power,
required_pl=?power_levels.kick,
"allowing kick/unban",
);
true
}
},
let allow = sender_creator
|| (sender_power.filter(|&p| p >= &power_levels.kick).is_some()
&& target_power < sender_power);
if !allow {
warn!(
?target_user_membership_event_id,
?power_levels_event_id,
"User does not have enough power to kick",
);
}
allow
},
| MembershipState::Ban =>
if !sender_is_joined {
warn!(
%sender,
?sender_membership_event_id,
"sender cannot ban another user as they are not joined to the room",
);
warn!(?sender_membership_event_id, "Can't ban user if sender is not joined");
false
} else {
let allow = sender_creator
@@ -1114,11 +945,9 @@ struct GetThirdPartyInvite {
&& target_power < sender_power);
if !allow {
warn!(
%sender,
%target_user,
?target_user_membership_event_id,
?power_levels_event_id,
"sender does not have enough power to ban the target",
"User does not have enough power to ban",
);
}
allow
@@ -1144,9 +973,9 @@ struct GetThirdPartyInvite {
} else if sender != target_user {
// 3. If `sender` does not match `state_key`, reject.
warn!(
%sender,
%target_user,
"sender cannot knock on behalf of another user",
?sender,
?target_user,
"Can't make another user knock, sender did not match target"
);
false
} else if matches!(
@@ -1158,25 +987,15 @@ struct GetThirdPartyInvite {
// 5. Otherwise, reject.
warn!(
?target_user_membership_event_id,
?sender_membership,
"Knocking with a membership state of ban, invite or join is invalid",
);
false
} else {
trace!(%sender, "allowing knock");
true
}
},
| _ => {
warn!(
%sender,
?target_membership,
%target_user,
%target_user_current_membership,
"Unknown or invalid membership transition {} -> {}",
target_user_current_membership,
target_membership
);
warn!("Unknown membership transition");
false
},
})
@@ -1206,13 +1025,6 @@ fn can_send_event(event: &impl Event, ple: Option<&impl Event>, user_level: Int)
if event.state_key().is_some_and(|k| k.starts_with('@'))
&& event.state_key() != Some(event.sender().as_str())
{
warn!(
%user_level,
required=?event_type_power_level,
state_key=?event.state_key(),
sender=%event.sender(),
"state_key starts with @ but does not match sender",
);
return false; // permission required to post in this room
}
@@ -1297,14 +1109,7 @@ fn check_power_levels(
// If the current value is equal to the sender's current power level, reject
if user != power_event.sender() && old_level == Some(&user_level) {
warn!(
?old_level,
?new_level,
?user,
%user_level,
sender=%power_event.sender(),
"cannot alter the power level of a user with the same power level as sender's own"
);
warn!("m.room.power_level cannot remove ops == to own");
return Some(false); // cannot remove ops level == to own
}
@@ -1312,26 +1117,8 @@ fn check_power_levels(
// If the new value is higher than the sender's current power level, reject
let old_level_too_big = old_level > Some(&user_level);
let new_level_too_big = new_level > Some(&user_level);
if old_level_too_big {
warn!(
?old_level,
?new_level,
?user,
%user_level,
sender=%power_event.sender(),
"cannot alter the power level of a user with a higher power level than sender's own"
);
return Some(false); // cannot add ops greater than own
}
if new_level_too_big {
warn!(
?old_level,
?new_level,
?user,
%user_level,
sender=%power_event.sender(),
"cannot set the power level of a user to a level higher than sender's own"
);
if old_level_too_big || new_level_too_big {
warn!("m.room.power_level failed to add ops > than own");
return Some(false); // cannot add ops greater than own
}
}
@@ -1348,26 +1135,8 @@ fn check_power_levels(
// If the new value is higher than the sender's current power level, reject
let old_level_too_big = old_level > Some(&user_level);
let new_level_too_big = new_level > Some(&user_level);
if old_level_too_big {
warn!(
?old_level,
?new_level,
?ev_type,
%user_level,
sender=%power_event.sender(),
"cannot alter the power level of an event with a higher power level than sender's own"
);
return Some(false); // cannot add ops greater than own
}
if new_level_too_big {
warn!(
?old_level,
?new_level,
?ev_type,
%user_level,
sender=%power_event.sender(),
"cannot set the power level of an event to a level higher than sender's own"
);
if old_level_too_big || new_level_too_big {
warn!("m.room.power_level failed to add ops > than own");
return Some(false); // cannot add ops greater than own
}
}
@@ -1382,13 +1151,7 @@ fn check_power_levels(
let old_level_too_big = old_level > user_level;
let new_level_too_big = new_level > user_level;
if old_level_too_big || new_level_too_big {
warn!(
?old_level,
?new_level,
%user_level,
sender=%power_event.sender(),
"cannot alter the power level of notifications greater than sender's own"
);
warn!("m.room.power_level failed to add ops > than own");
return Some(false); // cannot add ops greater than own
}
}
@@ -1412,14 +1175,7 @@ fn check_power_levels(
let new_level_too_big = new_lvl > user_level;
if old_level_too_big || new_level_too_big {
warn!(
?old_lvl,
?new_lvl,
%user_level,
sender=%power_event.sender(),
action=%lvl_name,
"cannot alter the power level of action greater than sender's own",
);
warn!("cannot add ops > than own");
return Some(false);
}
}

View File

@@ -36,7 +36,7 @@
room_version::RoomVersion,
};
use crate::{
debug, debug_error, err,
debug, debug_error,
matrix::{Event, StateKey},
state_res::room_version::StateResolutionVersion,
trace,
@@ -101,40 +101,40 @@ pub async fn resolve<'a, Pdu, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, Ex
debug!(version = ?stateres_version, "State resolution starting");
// Split non-conflicting and conflicting state
let (unconflicted, conflicting) = separate(state_sets.into_iter());
let (clean, conflicting) = separate(state_sets.into_iter());
debug!(count = unconflicted.len(), "non-conflicting events");
trace!(map = ?unconflicted, "non-conflicting events");
debug!(count = clean.len(), "non-conflicting events");
trace!(map = ?clean, "non-conflicting events");
if conflicting.is_empty() {
debug!("no conflicting state found");
return Ok(unconflicted);
return Ok(clean);
}
debug!(count = conflicting.len(), "conflicting events");
trace!(map = ?conflicting, "conflicting events");
let (conflicted_state_subgraph, initial_state) =
if stateres_version == StateResolutionVersion::V2_1 {
let csg = calculate_conflicted_subgraph(&conflicting, event_fetch)
let conflicted_state_subgraph: HashSet<_> = match stateres_version {
| StateResolutionVersion::V2_1 =>
calculate_conflicted_subgraph(&conflicting, event_fetch)
.await
.ok_or_else(|| {
Error::InvalidPdu("Failed to calculate conflicted subgraph".to_owned())
})?;
debug!(count = csg.len(), "conflicted subgraph");
trace!(set = ?csg, "conflicted subgraph");
(csg, HashMap::new())
} else {
(HashSet::new(), unconflicted.clone())
};
})?,
| _ => HashSet::new(),
};
debug!(count = conflicted_state_subgraph.len(), "conflicted subgraph");
trace!(set = ?conflicted_state_subgraph, "conflicted subgraph");
let conflicting_values = conflicting.into_values().flatten().stream();
// `all_conflicted` contains unique items
// synapse says `full_set = {eid for eid in full_conflicted_set if eid in
// event_map}`
// Hydra: Also consider the conflicted state subgraph
let all_conflicted: HashSet<_> = get_auth_chain_diff(auth_chain_sets)
.chain(conflicting.into_values().flatten().stream())
.broad_filter_map(async |id| event_exists(id.clone()).await.then_some(id))
.chain(conflicting_values)
.chain(conflicted_state_subgraph.into_iter().stream())
.broad_filter_map(async |id| event_exists(id.clone()).await.then_some(id))
.collect()
.await;
@@ -169,8 +169,9 @@ pub async fn resolve<'a, Pdu, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, Ex
// Sequentially auth check each control event.
let resolved_control = iterative_auth_check(
&room_version,
&stateres_version,
sorted_control_levels.iter().stream().map(AsRef::as_ref),
initial_state,
clean.clone(),
&event_fetch,
)
.await?;
@@ -200,7 +201,7 @@ pub async fn resolve<'a, Pdu, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, Ex
let power_levels_ty_sk = (StateEventType::RoomPowerLevels, StateKey::new());
let power_event = resolved_control.get(&power_levels_ty_sk);
trace!(event_id = ?power_event, "power event");
debug!(event_id = ?power_event, "power event");
let sorted_left_events =
mainline_sort(&events_to_resolve, power_event.cloned(), &event_fetch).await?;
@@ -209,14 +210,21 @@ pub async fn resolve<'a, Pdu, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, Ex
let mut resolved_state = iterative_auth_check(
&room_version,
&stateres_version,
sorted_left_events.iter().stream().map(AsRef::as_ref),
resolved_control, // The control events are added to the final resolved state
resolved_control.clone(), // The control events are added to the final resolved state
&event_fetch,
)
.await?;
// Ensure unconflicting state is in the final state
resolved_state.extend(unconflicted);
// Add unconflicted state to the resolved state
// We priorities the unconflicting state
resolved_state.extend(clean);
if stateres_version == StateResolutionVersion::V2_1 {
resolved_state.extend(resolved_control);
// TODO(hydra): this feels disgusting and wrong but it allows
// the state to resolve properly?
}
debug!("state resolution finished");
trace!( map = ?resolved_state, "final resolved state" );
@@ -311,19 +319,8 @@ async fn calculate_conflicted_subgraph<F, Fut, E>(
path.pop();
continue;
}
trace!(event_id = event_id.as_str(), "fetching event for its auth events");
let evt = fetch_event(event_id.clone()).await;
if evt.is_none() {
err!("could not fetch event {} to calculate conflicted subgraph", event_id);
path.pop();
continue;
}
stack.push(
evt.expect("checked")
.auth_events()
.map(ToOwned::to_owned)
.collect(),
);
let evt = fetch_event(event_id.clone()).await?;
stack.push(evt.auth_events().map(ToOwned::to_owned).collect());
seen.insert(event_id);
}
Some(subgraph)
@@ -419,8 +416,8 @@ async fn reverse_topological_power_sort<E, F, Fut>(
/// `key_fn` is used as to obtain the power level and age of an event for
/// breaking ties (together with the event ID).
#[tracing::instrument(level = "debug", skip_all)]
pub async fn lexicographical_topological_sort<Id, F, Fut, Hasher, S>(
graph: &HashMap<Id, HashSet<Id, Hasher>, S>,
pub async fn lexicographical_topological_sort<Id, F, Fut, Hasher>(
graph: &HashMap<Id, HashSet<Id, Hasher>>,
key_fn: &F,
) -> Result<Vec<Id>>
where
@@ -428,7 +425,6 @@ pub async fn lexicographical_topological_sort<Id, F, Fut, Hasher, S>(
Fut: Future<Output = Result<(Int, MilliSecondsSinceUnixEpoch)>> + Send,
Id: Borrow<EventId> + Clone + Eq + Hash + Ord + Send + Sync,
Hasher: BuildHasher + Default + Clone + Send + Sync,
S: BuildHasher + Clone + Send + Sync,
{
#[derive(PartialEq, Eq)]
struct TieBreaker<'a, Id> {
@@ -596,6 +592,7 @@ async fn get_power_level_for_sender<E, F, Fut>(
#[tracing::instrument(level = "trace", skip_all)]
async fn iterative_auth_check<'a, E, F, Fut, S>(
room_version: &RoomVersion,
stateres_version: &StateResolutionVersion,
events_to_check: S,
unconflicted_state: StateMap<OwnedEventId>,
fetch_event: &F,
@@ -620,10 +617,6 @@ async fn iterative_auth_check<'a, E, F, Fut, S>(
.boxed()
.await?;
trace!(list = ?events_to_check, "events to check");
if events_to_check.is_empty() {
debug!("no events to check, returning unconflicted state");
return Ok(unconflicted_state);
}
let auth_event_ids: HashSet<OwnedEventId> = events_to_check
.iter()
@@ -644,11 +637,10 @@ async fn iterative_auth_check<'a, E, F, Fut, S>(
trace!(map = ?auth_events.keys().collect::<Vec<_>>(), "fetched auth events");
let auth_events = &auth_events;
// NOTE: in state resolution v2.1, auth checks should start with an empty state
// map. It is the caller's job to do this. Previously, this function would
// force an empty state map in this case, and this resulted in power events
// going missing from the resolved state as they'd be discarded here.
let mut resolved_state = unconflicted_state;
let mut resolved_state = match stateres_version {
| StateResolutionVersion::V2_1 => StateMap::new(),
| _ => unconflicted_state,
};
for event in events_to_check {
trace!(event_id = event.event_id().as_str(), "checking event");
let state_key = event
@@ -1036,6 +1028,7 @@ async fn test_event_sort() {
let resolved_power = super::iterative_auth_check(
&RoomVersion::V6,
&StateResolutionVersion::V2,
sorted_power_events.iter().map(AsRef::as_ref).stream(),
HashMap::new(), // unconflicted events
&fetcher,
@@ -1074,8 +1067,7 @@ async fn test_event_sort() {
);
}
// NOTE(2025-09-17): Disabled due to unknown "create event must exist" bug
// #[tokio::test]
#[tokio::test]
async fn test_sort() {
for _ in 0..20 {
// since we shuffle the eventIds before we sort them introducing randomness
@@ -1084,8 +1076,7 @@ async fn test_sort() {
}
}
// NOTE(2025-09-17): Disabled due to unknown "create event must exist" bug
//#[tokio::test]
#[tokio::test]
async fn ban_vs_power_level() {
let _ = tracing::subscriber::set_default(
tracing_subscriber::fmt().with_test_writer().finish(),

View File

@@ -28,7 +28,7 @@ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// use conduwuit_core::utils::debug::slice_truncated;
///
/// #[tracing::instrument(fields(foos = slice_truncated(foos, 42)))]
/// fn bar(foos: &[&str]) {};
/// fn bar(foos: &[&str]);
/// ```
pub fn slice_truncated<T: fmt::Debug>(
slice: &[T],

View File

@@ -276,22 +276,3 @@ async fn set_intersection_sorted_stream2() {
.await;
assert!(r.eq(&["ccc", "ggg", "iii"]));
}
#[test]
fn is_within_bounds() {
use std::time::{Duration, SystemTime};
use utils::time::{TimeDirection, is_within_bounds};
let now = SystemTime::now();
let yesterday = now - Duration::from_secs(86400);
assert!(is_within_bounds(yesterday, now, TimeDirection::Before));
assert!(!is_within_bounds(yesterday, now, TimeDirection::After));
let tomorrow = now + Duration::from_secs(86400);
assert!(is_within_bounds(tomorrow, now, TimeDirection::After));
assert!(!is_within_bounds(tomorrow, now, TimeDirection::Before));
assert!(is_within_bounds(now, now, TimeDirection::Before));
assert!(is_within_bounds(now, now, TimeDirection::After));
}

View File

@@ -126,24 +126,3 @@ pub enum Unit {
Micros(u128),
Nanos(u128),
}
#[derive(Eq, PartialEq, Clone, Copy, Debug)]
pub enum TimeDirection {
Before,
After,
}
/// Checks if `item_time` is before or after `time_boundary`.
/// If both times are the same, it will return true for both directions, as the
/// matching is inclusive.
#[must_use]
pub fn is_within_bounds(
item_time: SystemTime,
time_boundary: SystemTime,
direction: TimeDirection,
) -> bool {
match direction {
| TimeDirection::Before => item_time <= time_boundary,
| TimeDirection::After => item_time >= time_boundary,
}
}

View File

@@ -417,7 +417,7 @@ fn deserialize_ignored_any<V: Visitor<'de>>(self, _visitor: V) -> Result<V::Valu
fn deserialize_any<V: Visitor<'de>>(self, visitor: V) -> Result<V::Value> {
debug_assert_eq!(
conduwuit::debug::type_name::<V>(),
"serde_json::value::de::<impl serde_core::de::Deserialize for \
"serde_json::value::de::<impl serde::de::Deserialize for \
serde_json::value::Value>::deserialize::ValueVisitor",
"deserialize_any: type not expected"
);

View File

@@ -1,9 +1,7 @@
use std::{cmp, convert::TryFrom};
use conduwuit::{Config, Result, utils, warn};
use rocksdb::{
Cache, DBCompressionType, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel,
};
use conduwuit::{Config, Result, utils};
use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel};
use super::{cf_opts::cache_size_f64, logger::handle as handle_log};
@@ -60,20 +58,6 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul
opts.set_max_total_wal_size(1024 * 1024 * 512);
opts.set_writable_file_max_buffer_size(1024 * 1024 * 2);
// WAL compression
let wal_compression = match config.rocksdb_wal_compression.as_ref() {
| "zstd" => DBCompressionType::Zstd,
| "none" => DBCompressionType::None,
| value => {
warn!(
"Invalid rocksdb_wal_compression value '{value}'. Supported values are 'none' \
or 'zstd'. Defaulting to 'none'."
);
DBCompressionType::None
},
};
opts.set_wal_compression_type(wal_compression);
// Misc
opts.set_disable_auto_compactions(!config.rocksdb_compaction);
opts.create_missing_column_families(true);

View File

@@ -10,7 +10,7 @@
use futures::{Stream, StreamExt, TryStreamExt};
use rocksdb::{DBPinnableSlice, ReadOptions};
use super::get::handle_from;
use super::get::{cached_handle_from, handle_from};
use crate::Handle;
pub trait Get<'a, K, S>
@@ -58,6 +58,20 @@ pub(crate) fn get_batch<'a, S, K>(
.try_flatten()
}
#[implement(super::Map)]
#[tracing::instrument(name = "batch_cached", level = "trace", skip_all)]
pub(crate) fn get_batch_cached<'a, I, K>(
&self,
keys: I,
) -> impl Iterator<Item = Result<Option<Handle<'_>>>> + Send + use<'_, I, K>
where
I: Iterator<Item = &'a K> + ExactSizeIterator + Send,
K: AsRef<[u8]> + Send + ?Sized + Sync + 'a,
{
self.get_batch_blocking_opts(keys, &self.cache_read_options)
.map(cached_handle_from)
}
#[implement(super::Map)]
#[tracing::instrument(name = "batch_blocking", level = "trace", skip_all)]
pub(crate) fn get_batch_blocking<'a, I, K>(

View File

@@ -434,8 +434,4 @@ pub(super) fn open_list(db: &Arc<Engine>, maps: &[Descriptor]) -> Result<Maps> {
name: "userroomid_notificationcount",
..descriptor::RANDOM
},
Descriptor {
name: "userroomid_invitesender",
..descriptor::RANDOM_SMALL
},
];

View File

@@ -184,7 +184,7 @@ fn spawn_one(
let handle = thread::Builder::new()
.name(WORKER_NAME.into())
.stack_size(WORKER_STACK_SIZE)
.spawn(move || self.worker(id, &recv))?;
.spawn(move || self.worker(id, recv))?;
workers.push(handle);
@@ -260,9 +260,9 @@ async fn execute(&self, queue: &Sender<Cmd>, cmd: Cmd) -> Result {
tid = ?thread::current().id(),
),
)]
fn worker(self: Arc<Self>, id: usize, recv: &Receiver<Cmd>) {
fn worker(self: Arc<Self>, id: usize, recv: Receiver<Cmd>) {
self.worker_init(id);
self.worker_loop(recv);
self.worker_loop(&recv);
}
#[implement(Pool)]
@@ -309,7 +309,7 @@ fn worker_loop(self: &Arc<Self>, recv: &Receiver<Cmd>) {
self.busy.fetch_add(1, Ordering::Relaxed);
while let Ok(cmd) = self.worker_wait(recv) {
Pool::worker_handle(cmd);
self.worker_handle(cmd);
}
}
@@ -331,11 +331,11 @@ fn worker_wait(self: &Arc<Self>, recv: &Receiver<Cmd>) -> Result<Cmd, RecvError>
}
#[implement(Pool)]
fn worker_handle(cmd: Cmd) {
fn worker_handle(self: &Arc<Self>, cmd: Cmd) {
match cmd {
| Cmd::Get(cmd) if cmd.key.len() == 1 => Pool::handle_get(cmd),
| Cmd::Get(cmd) => Pool::handle_batch(cmd),
| Cmd::Iter(cmd) => Pool::handle_iter(cmd),
| Cmd::Get(cmd) if cmd.key.len() == 1 => self.handle_get(cmd),
| Cmd::Get(cmd) => self.handle_batch(cmd),
| Cmd::Iter(cmd) => self.handle_iter(cmd),
}
}
@@ -346,7 +346,7 @@ fn worker_handle(cmd: Cmd) {
skip_all,
fields(%cmd.map),
)]
fn handle_iter(mut cmd: Seek) {
fn handle_iter(&self, mut cmd: Seek) {
let chan = cmd.res.take().expect("missing result channel");
if chan.is_canceled() {
@@ -375,7 +375,7 @@ fn handle_iter(mut cmd: Seek) {
keys = %cmd.key.len(),
),
)]
fn handle_batch(mut cmd: Get) {
fn handle_batch(self: &Arc<Self>, mut cmd: Get) {
debug_assert!(cmd.key.len() > 1, "should have more than one key");
debug_assert!(!cmd.key.iter().any(SmallVec::is_empty), "querying for empty key");
@@ -401,7 +401,7 @@ fn handle_batch(mut cmd: Get) {
skip_all,
fields(%cmd.map),
)]
fn handle_get(mut cmd: Get) {
fn handle_get(&self, mut cmd: Get) {
debug_assert!(!cmd.key[0].is_empty(), "querying for empty key");
// Obtain the result channel.

View File

@@ -25,13 +25,13 @@ pub(super) fn refutable(mut item: ItemFn, _args: &[Meta]) -> Result<TokenStream>
};
let name = format!("_args_{i}");
**pat = Pat::Ident(PatIdent {
*pat = Box::new(Pat::Ident(PatIdent {
ident: Ident::new(&name, Span::call_site().into()),
attrs: Vec::new(),
by_ref: None,
mutability: None,
subpat: None,
});
}));
let field = fields.iter();
let refute = quote! {

View File

@@ -15,13 +15,13 @@ pub(super) fn flags_capture(args: TokenStream) -> TokenStream {
#[ctor]
fn _set_rustc_flags() {
conduwuit_core::info::rustc::FLAGS.lock().expect("locked").insert(#crate_name, &RUSTC_FLAGS);
conduwuit_core::info::rustc::FLAGS.lock().insert(#crate_name, &RUSTC_FLAGS);
}
// static strings have to be yanked on module unload
#[dtor]
fn _unset_rustc_flags() {
conduwuit_core::info::rustc::FLAGS.lock().expect("locked").remove(#crate_name);
conduwuit_core::info::rustc::FLAGS.lock().remove(#crate_name);
}
};

View File

@@ -22,13 +22,11 @@ crate-type = [
]
[package.metadata.deb]
name = "continuwuity"
maintainer = "continuwuity developers <contact@continuwuity.org>"
copyright = "2024, continuwuity developers"
name = "conduwuit"
maintainer = "strawberry <strawberry@puppygock.gay>"
copyright = "2024, strawberry <strawberry@puppygock.gay>"
license-file = ["../../LICENSE", "3"]
depends = "$auto, ca-certificates"
breaks = ["conduwuit (<<0.5.0)"]
replaces = ["conduwuit (<<0.5.0)"]
extended-description = """\
a cool hard fork of Conduit, a Matrix homeserver written in Rust"""
section = "net"
@@ -62,8 +60,7 @@ standard = [
"media_thumbnail",
"systemd",
"url_preview",
"zstd_compression",
"sentry_telemetry"
"zstd_compression"
]
full = [
"standard",
@@ -130,6 +127,7 @@ perf_measurements = [
"dep:tracing-opentelemetry",
"dep:opentelemetry_sdk",
"dep:opentelemetry-otlp",
"dep:opentelemetry-jaeger-propagator",
"conduwuit-core/perf_measurements",
"conduwuit-core/sentry_telemetry",
]
@@ -156,7 +154,6 @@ sentry_telemetry = [
]
systemd = [
"conduwuit-router/systemd",
"conduwuit-service/systemd"
]
journald = [ # This is a stub on non-unix platforms
"dep:tracing-journald",
@@ -201,7 +198,6 @@ conduwuit-core.workspace = true
conduwuit-database.workspace = true
conduwuit-router.workspace = true
conduwuit-service.workspace = true
conduwuit-build-metadata.workspace = true
clap.workspace = true
console-subscriber.optional = true
@@ -213,6 +209,8 @@ opentelemetry.optional = true
opentelemetry.workspace = true
opentelemetry-otlp.optional = true
opentelemetry-otlp.workspace = true
opentelemetry-jaeger-propagator.optional = true
opentelemetry-jaeger-propagator.workspace = true
opentelemetry_sdk.optional = true
opentelemetry_sdk.workspace = true
sentry-tower.optional = true

View File

@@ -94,7 +94,7 @@ pub(crate) fn init(
let otlp_layer = config.allow_otlp.then(|| {
opentelemetry::global::set_text_map_propagator(
opentelemetry_sdk::propagation::TraceContextPropagator::new(),
opentelemetry_jaeger_propagator::Propagator::new(),
);
let exporter = opentelemetry_otlp::SpanExporter::builder()

View File

@@ -1,12 +1,10 @@
#![cfg(feature = "sentry_telemetry")]
use std::{
borrow::Cow,
str::FromStr,
sync::{Arc, OnceLock},
};
use conduwuit_build_metadata as build;
use conduwuit_core::{config::Config, debug, trace};
use sentry::{
Breadcrumb, ClientOptions, Level,
@@ -46,7 +44,7 @@ fn options(config: &Config) -> ClientOptions {
server_name,
traces_sample_rate: config.sentry_traces_sample_rate,
debug: cfg!(debug_assertions),
release: release_name(),
release: sentry::release_name!(),
user_agent: conduwuit_core::version::user_agent().into(),
attach_stacktrace: config.sentry_attach_stacktrace,
before_send: Some(Arc::new(before_send)),
@@ -93,21 +91,3 @@ fn before_breadcrumb(crumb: Breadcrumb) -> Option<Breadcrumb> {
trace!("Sentry breadcrumb: {crumb:?}");
Some(crumb)
}
fn release_name() -> Option<Cow<'static, str>> {
static RELEASE: OnceLock<Option<String>> = OnceLock::new();
RELEASE
.get_or_init(|| {
let pkg_name = env!("CARGO_PKG_NAME");
let pkg_version = env!("CARGO_PKG_VERSION");
if let Some(commit_short) = build::GIT_COMMIT_HASH_SHORT {
Some(format!("{pkg_name}@{pkg_version}+{commit_short}"))
} else {
Some(format!("{pkg_name}@{pkg_version}"))
}
})
.as_ref()
.map(|s| Cow::Borrowed(s.as_str()))
}

View File

@@ -40,6 +40,7 @@ io_uring = [
"conduwuit-admin/io_uring",
"conduwuit-api/io_uring",
"conduwuit-service/io_uring",
"conduwuit-api/io_uring",
]
jemalloc = [
"conduwuit-admin/jemalloc",

View File

@@ -65,7 +65,7 @@ pub(crate) async fn start(server: Arc<Server>) -> Result<Arc<Services>> {
let services = Services::build(server).await?.start().await?;
#[cfg(all(feature = "systemd", target_os = "linux"))]
sd_notify::notify(false, &[sd_notify::NotifyState::Ready])
sd_notify::notify(true, &[sd_notify::NotifyState::Ready])
.expect("failed to notify systemd of ready state");
debug!("Started");
@@ -78,7 +78,7 @@ pub(crate) async fn stop(services: Arc<Services>) -> Result<()> {
debug!("Shutting down...");
#[cfg(all(feature = "systemd", target_os = "linux"))]
sd_notify::notify(false, &[sd_notify::NotifyState::Stopping])
sd_notify::notify(true, &[sd_notify::NotifyState::Stopping])
.expect("failed to notify systemd of stopping state");
// Wait for all completions before dropping or we'll lose them to the module

View File

@@ -67,9 +67,6 @@ release_max_log_level = [
"tracing/max_level_trace",
"tracing/release_max_level_info",
]
systemd = [
"dep:sd-notify",
]
url_preview = [
"dep:image",
"dep:webpage",
@@ -108,7 +105,7 @@ rustyline-async.workspace = true
rustyline-async.optional = true
serde_json.workspace = true
serde.workspace = true
serde-saphyr.workspace = true
serde_yml.workspace = true
sha2.workspace = true
termimad.workspace = true
termimad.optional = true
@@ -121,11 +118,6 @@ blurhash.workspace = true
blurhash.optional = true
recaptcha-verify = { version = "0.1.5", default-features = false }
ctor.workspace = true
dashmap = "6.1.0"
[target.'cfg(all(unix, target_os = "linux"))'.dependencies]
sd-notify.workspace = true
sd-notify.optional = true
[lints]
workspace = true

View File

@@ -271,7 +271,7 @@ pub async fn get_db_registration(&self, id: &str) -> Result<Registration> {
.id_appserviceregistrations
.get(id)
.await
.and_then(|ref bytes| serde_saphyr::from_slice(bytes).map_err(Into::into))
.and_then(|ref bytes| serde_yml::from_slice(bytes).map_err(Into::into))
.map_err(|e| err!(Database("Invalid appservice {id:?} registration: {e:?}")))
}

Some files were not shown because too many files have changed in this diff Show More