mirror of
https://forgejo.ellis.link/continuwuation/continuwuity/
synced 2026-04-12 21:15:40 +00:00
Compare commits
222 Commits
nex/feat/b
...
jade/flake
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8c7cc68cbf | ||
|
|
dc047b635f | ||
|
|
cc4c2fed25 | ||
|
|
17e47ecd6d | ||
|
|
b1d5ff477b | ||
|
|
d6dc01ac2c | ||
|
|
77ebe0d02f | ||
|
|
81e3d4c905 | ||
|
|
cb8f36444c | ||
|
|
799def70dc | ||
|
|
20f741d0e5 | ||
|
|
d38f4a24f2 | ||
|
|
6604cc4df9 | ||
|
|
89aa4d1eae | ||
|
|
9231ea5114 | ||
|
|
4a3c72338d | ||
|
|
ab862f4383 | ||
|
|
bd43be931a | ||
|
|
148240cbbb | ||
|
|
2e9e42d9ae | ||
|
|
89fbda0d6e | ||
|
|
c97eb5c889 | ||
|
|
366ec46b26 | ||
|
|
62a98ebc71 | ||
|
|
439c605efe | ||
|
|
32df2f3487 | ||
|
|
692da7ffc2 | ||
|
|
1082b24b1d | ||
|
|
f45ceedb8a | ||
|
|
d614e43981 | ||
|
|
1e0e7a31aa | ||
|
|
92fffe9c82 | ||
|
|
11e51300a5 | ||
|
|
ef84e1bb02 | ||
|
|
1887d58df8 | ||
|
|
c66f6f8900 | ||
|
|
902fe7b7ab | ||
|
|
472e1fee17 | ||
|
|
3c6f2d07e0 | ||
|
|
43254aa396 | ||
|
|
48ebf86335 | ||
|
|
f1e3b4907e | ||
|
|
9346a0d05e | ||
|
|
c99faae115 | ||
|
|
a5aa68ee8d | ||
|
|
8959ac06ac | ||
|
|
47f7ebfd68 | ||
|
|
7d91f218b1 | ||
|
|
e5e2db37d9 | ||
|
|
e08ea3b9e5 | ||
|
|
4f1907abfa | ||
|
|
92d74c293e | ||
|
|
3fbdced0e1 | ||
|
|
b70470fa71 | ||
|
|
703d6a2075 | ||
|
|
5b75e21810 | ||
|
|
13b7538785 | ||
|
|
9745bcba1c | ||
|
|
c9c79fbea6 | ||
|
|
92e9802340 | ||
|
|
1d80b7ce0c | ||
|
|
563b6d4b30 | ||
|
|
e86fc6d9f8 | ||
|
|
13adea6498 | ||
|
|
17d0bb6cf6 | ||
|
|
6dc5051fa6 | ||
|
|
3034c03ad1 | ||
|
|
fa6f549d39 | ||
|
|
999217b0f6 | ||
|
|
74fccff2cc | ||
|
|
7a56a2462c | ||
|
|
458811f241 | ||
|
|
0672ce5b88 | ||
|
|
7f287c7880 | ||
|
|
9142978a15 | ||
|
|
a8eb9c47f8 | ||
|
|
9f18cf667a | ||
|
|
7e4071c117 | ||
|
|
51423c9d7d | ||
|
|
a0b0ff9d5c | ||
|
|
8e27d74c4a | ||
|
|
d6b1055683 | ||
|
|
c9117e6ee4 | ||
|
|
e3415a500d | ||
|
|
e6fd3c970b | ||
|
|
6b7f35a8b8 | ||
|
|
a120a4fa95 | ||
|
|
f872210b20 | ||
|
|
3dd04bd9df | ||
|
|
af45c348a4 | ||
|
|
36dabecb82 | ||
|
|
50cd1081ba | ||
|
|
14df55e5c5 | ||
|
|
d9d0d1a465 | ||
|
|
81b6b3547c | ||
|
|
0bbc3c4e05 | ||
|
|
0f09fa3d31 | ||
|
|
3d5355dfc3 | ||
|
|
2547eb3a90 | ||
|
|
51ba41823f | ||
|
|
542dff50bd | ||
|
|
9c147b182f | ||
|
|
7e76ca45c1 | ||
|
|
5126cb4554 | ||
|
|
4d05d0f677 | ||
|
|
0673ac1a6c | ||
|
|
ad11417145 | ||
|
|
0de904ffe4 | ||
|
|
d74b9de221 | ||
|
|
e7ac5988cb | ||
|
|
571f05017c | ||
|
|
a339e73eb5 | ||
|
|
72b78ed6d4 | ||
|
|
baa89586e2 | ||
|
|
7ad8ff2e45 | ||
|
|
2046b1e2f6 | ||
|
|
2cb980cd4c | ||
|
|
27e0ef7b2e | ||
|
|
7091882887 | ||
|
|
a81546374d | ||
|
|
7950e2cc7f | ||
|
|
8f186cd770 | ||
|
|
5d3e10a048 | ||
|
|
1e541875ad | ||
|
|
90fd92977e | ||
|
|
e27ef7f5ec | ||
|
|
16f4efa708 | ||
|
|
e38dec5864 | ||
|
|
f3824ffc3d | ||
|
|
e3fbf7a143 | ||
|
|
09de586dc7 | ||
|
|
d1fff1d09f | ||
|
|
f47474d12a | ||
|
|
53da294e53 | ||
|
|
2cdccbf2fe | ||
|
|
6cf3c839e4 | ||
|
|
4a1091dd06 | ||
|
|
1e9701f379 | ||
|
|
2cedf0d2e1 | ||
|
|
84fdcd326a | ||
|
|
d640853f9d | ||
|
|
fff9629b0f | ||
|
|
1a3107c20a | ||
|
|
969d7cbb66 | ||
|
|
cd238b05de | ||
|
|
c0e3829fed | ||
|
|
1d7dda6cf5 | ||
|
|
6f19931c5b | ||
|
|
2516e783ba | ||
|
|
fdf5771387 | ||
|
|
58bbc0e676 | ||
|
|
0d58e660a2 | ||
|
|
e7124edb73 | ||
|
|
d19e0f0d97 | ||
|
|
467aed3028 | ||
|
|
99b44bbf09 | ||
|
|
95aeff8cdc | ||
|
|
9e62e66ae4 | ||
|
|
76b93e252d | ||
|
|
66d479e2eb | ||
|
|
241371463e | ||
|
|
d970df5fd2 | ||
|
|
4e644961f3 | ||
|
|
35cf9af5c8 | ||
|
|
04e796176a | ||
|
|
9783940105 | ||
|
|
1e430f9470 | ||
|
|
5cce024841 | ||
|
|
e87c461b8d | ||
|
|
b934898f51 | ||
|
|
83e3de55a4 | ||
|
|
609e239436 | ||
|
|
34417c96ae | ||
|
|
f33f281edb | ||
|
|
ddbca59193 | ||
|
|
b5a2e49ae4 | ||
|
|
37248a4f68 | ||
|
|
dd22325ea2 | ||
|
|
30a56d5cb9 | ||
|
|
3183210459 | ||
|
|
57d7743037 | ||
|
|
cb09bfa4e7 | ||
|
|
0ed691edef | ||
|
|
c58b9f05ed | ||
|
|
fb7e739b72 | ||
|
|
c7adbae03f | ||
|
|
8b35de6a43 | ||
|
|
d191494f18 | ||
|
|
6d1f12b22d | ||
|
|
ca3ee9224b | ||
|
|
427b973b67 | ||
|
|
aacaf5a2a0 | ||
|
|
256bed992e | ||
|
|
ecb87ccd1c | ||
|
|
14a4b24fc5 | ||
|
|
731761f0fc | ||
|
|
4524a00fc6 | ||
|
|
9db750e97c | ||
|
|
b14a4d470b | ||
|
|
5d1f141882 | ||
|
|
b447cfff56 | ||
|
|
283888e788 | ||
|
|
f54e59a068 | ||
|
|
2a183cc5a4 | ||
|
|
54acd07555 | ||
|
|
583cb924f1 | ||
|
|
9286838d23 | ||
|
|
d1ebcfaf0b | ||
|
|
e820551f62 | ||
|
|
bd3db65cb2 | ||
|
|
e4a43b1a5b | ||
|
|
5775e0ad9d | ||
|
|
238cc627e3 | ||
|
|
b1516209c4 | ||
|
|
0589884109 | ||
|
|
4a83df5b57 | ||
|
|
aa08edc55f | ||
|
|
00c7e220bb | ||
|
|
87be4d1a52 | ||
|
|
205506f206 | ||
|
|
66181c61af | ||
|
|
b7a0442298 |
@@ -26,3 +26,7 @@ max_line_length = 98
|
||||
[*.yml]
|
||||
indent_size = 2
|
||||
indent_style = space
|
||||
|
||||
[*.json]
|
||||
indent_size = 4
|
||||
indent_style = space
|
||||
|
||||
4
.envrc
4
.envrc
@@ -2,6 +2,8 @@
|
||||
|
||||
dotenv_if_exists
|
||||
|
||||
# use flake ".#${DIRENV_DEVSHELL:-default}"
|
||||
if [ -f /etc/os-release ] && grep -q '^ID=nixos' /etc/os-release; then
|
||||
use flake ".#${DIRENV_DEVSHELL:-default}"
|
||||
fi
|
||||
|
||||
PATH_add bin
|
||||
|
||||
108
.forgejo/actions/create-docker-manifest/action.yml
Normal file
108
.forgejo/actions/create-docker-manifest/action.yml
Normal file
@@ -0,0 +1,108 @@
|
||||
name: create-manifest
|
||||
description: |
|
||||
Create and push a multi-platform Docker manifest from individual platform digests.
|
||||
Handles downloading digests, creating manifest lists, and pushing to registry.
|
||||
|
||||
inputs:
|
||||
digest_pattern:
|
||||
description: Glob pattern to match digest artifacts (e.g. "digests-linux-{amd64,arm64}")
|
||||
required: true
|
||||
tag_suffix:
|
||||
description: Suffix to add to all Docker tags (e.g. "-maxperf")
|
||||
required: false
|
||||
default: ""
|
||||
images:
|
||||
description: Container registry images (newline-separated)
|
||||
required: true
|
||||
registry_user:
|
||||
description: Registry username for authentication
|
||||
required: false
|
||||
registry_password:
|
||||
description: Registry password for authentication
|
||||
required: false
|
||||
|
||||
outputs:
|
||||
version:
|
||||
description: The version tag created for the manifest
|
||||
value: ${{ steps.meta.outputs.version }}
|
||||
tags:
|
||||
description: All tags created for the manifest
|
||||
value: ${{ steps.meta.outputs.tags }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Download digests
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: forgejo/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: ${{ inputs.digest_pattern }}
|
||||
merge-multiple: true
|
||||
|
||||
- name: Login to builtin registry
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ inputs.registry_user }}
|
||||
password: ${{ inputs.registry_password }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
|
||||
|
||||
- name: Extract metadata (tags) for Docker
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
flavor: |
|
||||
suffix=${{ inputs.tag_suffix }},onlatest=true
|
||||
tags: |
|
||||
type=semver,pattern={{version}},prefix=v
|
||||
type=semver,pattern={{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }},prefix=v
|
||||
type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }},prefix=v
|
||||
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }},
|
||||
type=ref,event=pr
|
||||
type=sha,format=short
|
||||
type=raw,value=latest${{ inputs.tag_suffix }},enable=${{ startsWith(github.ref, 'refs/tags/v') }},priority=1100
|
||||
images: ${{ inputs.images }}
|
||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
|
||||
- name: Create manifest list and push
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
working-directory: /tmp/digests
|
||||
shell: bash
|
||||
env:
|
||||
IMAGES: ${{ inputs.images }}
|
||||
run: |
|
||||
set -o xtrace
|
||||
IFS=$'\n'
|
||||
IMAGES_LIST=($IMAGES)
|
||||
ANNOTATIONS_LIST=($DOCKER_METADATA_OUTPUT_ANNOTATIONS)
|
||||
TAGS_LIST=($DOCKER_METADATA_OUTPUT_TAGS)
|
||||
for REPO in "${IMAGES_LIST[@]}"; do
|
||||
docker buildx imagetools create \
|
||||
$(for tag in "${TAGS_LIST[@]}"; do echo "--tag"; echo "$tag"; done) \
|
||||
$(for annotation in "${ANNOTATIONS_LIST[@]}"; do echo "--annotation"; echo "$annotation"; done) \
|
||||
$(for reference in *; do printf "$REPO@sha256:%s\n" $reference; done)
|
||||
done
|
||||
|
||||
- name: Inspect image
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
shell: bash
|
||||
env:
|
||||
IMAGES: ${{ inputs.images }}
|
||||
run: |
|
||||
set -o xtrace
|
||||
IMAGES_LIST=($IMAGES)
|
||||
for REPO in "${IMAGES_LIST[@]}"; do
|
||||
docker buildx imagetools inspect $REPO:${{ steps.meta.outputs.version }}
|
||||
done
|
||||
@@ -1,27 +0,0 @@
|
||||
name: prefligit
|
||||
description: |
|
||||
Runs prefligit, pre-commit reimplemented in Rust.
|
||||
inputs:
|
||||
extra_args:
|
||||
description: options to pass to pre-commit run
|
||||
required: false
|
||||
default: '--all-files'
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install uv
|
||||
uses: https://github.com/astral-sh/setup-uv@v6
|
||||
with:
|
||||
enable-cache: true
|
||||
ignore-nothing-to-cache: true
|
||||
- name: Install Prefligit
|
||||
shell: bash
|
||||
run: |
|
||||
curl --proto '=https' --tlsv1.2 -LsSf https://github.com/j178/prefligit/releases/download/v0.0.10/prefligit-installer.sh | sh
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/prefligit
|
||||
key: prefligit-0|${{ hashFiles('.pre-commit-config.yaml') }}
|
||||
- run: prefligit run --show-diff-on-failure --color=always -v ${{ inputs.extra_args }}
|
||||
shell: bash
|
||||
169
.forgejo/actions/prepare-docker-build/action.yml
Normal file
169
.forgejo/actions/prepare-docker-build/action.yml
Normal file
@@ -0,0 +1,169 @@
|
||||
name: prepare-docker-build
|
||||
description: |
|
||||
Prepare the Docker build environment for Continuwuity builds.
|
||||
Sets up Rust toolchain, Docker Buildx, caching, and extracts metadata for Docker builds.
|
||||
|
||||
inputs:
|
||||
platform:
|
||||
description: Target platform (e.g. linux/amd64, linux/arm64)
|
||||
required: true
|
||||
slug:
|
||||
description: Platform slug for artifact naming (e.g. linux-amd64, linux-arm64)
|
||||
required: true
|
||||
target_cpu:
|
||||
description: Target CPU architecture (e.g. haswell, empty for base)
|
||||
required: false
|
||||
default: ""
|
||||
profile:
|
||||
description: Cargo build profile (release or release-max-perf)
|
||||
required: true
|
||||
images:
|
||||
description: Container registry images (newline-separated)
|
||||
required: true
|
||||
registry_user:
|
||||
description: Registry username for authentication
|
||||
required: false
|
||||
registry_password:
|
||||
description: Registry password for authentication
|
||||
required: false
|
||||
|
||||
outputs:
|
||||
cpu_suffix:
|
||||
description: CPU suffix for artifact naming
|
||||
value: ${{ steps.cpu-suffix.outputs.suffix }}
|
||||
metadata_labels:
|
||||
description: Docker labels for the image
|
||||
value: ${{ steps.meta.outputs.labels }}
|
||||
metadata_annotations:
|
||||
description: Docker annotations for the image
|
||||
value: ${{ steps.meta.outputs.annotations }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Set CPU suffix variable
|
||||
id: cpu-suffix
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -n "${{ inputs.target_cpu }}" ]]; then
|
||||
echo "suffix=-${{ inputs.target_cpu }}" >> $GITHUB_OUTPUT
|
||||
echo "CPU_SUFFIX=-${{ inputs.target_cpu }}" >> $GITHUB_ENV
|
||||
else
|
||||
echo "suffix=" >> $GITHUB_OUTPUT
|
||||
echo "CPU_SUFFIX=" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Echo matrix configuration
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Platform: ${{ inputs.platform }}"
|
||||
echo "Slug: ${{ inputs.slug }}"
|
||||
echo "Target CPU: ${{ inputs.target_cpu }}"
|
||||
echo "Profile: ${{ inputs.profile }}"
|
||||
|
||||
- name: Install rust
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
id: rust-toolchain
|
||||
uses: ./.forgejo/actions/rust-toolchain
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
# Use persistent BuildKit if BUILDKIT_ENDPOINT is set (e.g. tcp://buildkit:8125)
|
||||
driver: ${{ env.BUILDKIT_ENDPOINT != '' && 'remote' || 'docker-container' }}
|
||||
endpoint: ${{ env.BUILDKIT_ENDPOINT || '' }}
|
||||
|
||||
- name: Set up QEMU
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Login to builtin registry
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ inputs.registry_user }}
|
||||
password: ${{ inputs.registry_password }}
|
||||
|
||||
- name: Extract metadata (labels, annotations) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ inputs.images }}
|
||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
|
||||
|
||||
- name: Get short git commit SHA
|
||||
id: sha
|
||||
shell: bash
|
||||
run: |
|
||||
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
|
||||
echo "Short SHA: $calculatedSha"
|
||||
|
||||
- name: Get Git commit timestamps
|
||||
shell: bash
|
||||
run: |
|
||||
timestamp=$(git log -1 --pretty=%ct)
|
||||
echo "TIMESTAMP=$timestamp" >> $GITHUB_ENV
|
||||
echo "Commit timestamp: $timestamp"
|
||||
|
||||
- uses: ./.forgejo/actions/timelord
|
||||
id: timelord
|
||||
|
||||
- name: Cache Rust registry
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
.cargo/git
|
||||
.cargo/git/checkouts
|
||||
.cargo/registry
|
||||
.cargo/registry/src
|
||||
key: continuwuity-rust-registry-image-${{hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Cache cargo target
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
id: cache-cargo-target
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
cargo-target${{ env.CPU_SUFFIX }}-${{ inputs.slug }}-${{ inputs.profile }}
|
||||
key: continuwuity-cargo-target${{ env.CPU_SUFFIX }}-${{ inputs.slug }}-${{ inputs.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
|
||||
|
||||
- name: Cache apt cache
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
id: cache-apt
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
var-cache-apt-${{ inputs.slug }}
|
||||
key: continuwuity-var-cache-apt-${{ inputs.slug }}
|
||||
|
||||
- name: Cache apt lib
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
id: cache-apt-lib
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
var-lib-apt-${{ inputs.slug }}
|
||||
key: continuwuity-var-lib-apt-${{ inputs.slug }}
|
||||
|
||||
- name: inject cache into docker
|
||||
if: ${{ env.BUILDKIT_ENDPOINT == '' }}
|
||||
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.3.0
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
".cargo/registry": "/usr/local/cargo/registry",
|
||||
".cargo/git/db": "/usr/local/cargo/git/db",
|
||||
"cargo-target${{ env.CPU_SUFFIX }}-${{ inputs.slug }}-${{ inputs.profile }}": {
|
||||
"target": "/app/target",
|
||||
"id": "cargo-target${{ env.CPU_SUFFIX }}-${{ inputs.slug }}-${{ inputs.profile }}"
|
||||
},
|
||||
"var-cache-apt-${{ inputs.slug }}": "/var/cache/apt",
|
||||
"var-lib-apt-${{ inputs.slug }}": "/var/lib/apt",
|
||||
"${{ steps.timelord.outputs.database-path }}":"/timelord"
|
||||
}
|
||||
skip-extraction: ${{ steps.cache.outputs.cache-hit }}
|
||||
@@ -40,7 +40,7 @@ runs:
|
||||
!~/.rustup/tmp
|
||||
!~/.rustup/downloads
|
||||
# Requires repo to be cloned if toolchain is not specified
|
||||
key: ${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
|
||||
key: continuwuity-${{ runner.os }}-rustup-${{ inputs.toolchain || hashFiles('**/rust-toolchain.toml') }}
|
||||
- name: Install Rust toolchain
|
||||
if: steps.rustup-version.outputs.version == ''
|
||||
shell: bash
|
||||
|
||||
@@ -2,20 +2,14 @@ name: sccache
|
||||
description: |
|
||||
Install sccache for caching builds in GitHub Actions.
|
||||
|
||||
inputs:
|
||||
token:
|
||||
description: 'A Github PAT'
|
||||
required: false
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Install sccache
|
||||
uses: https://github.com/mozilla-actions/sccache-action@v0.0.9
|
||||
with:
|
||||
token: ${{ inputs.token }}
|
||||
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
||||
- name: Configure sccache
|
||||
uses: https://github.com/actions/github-script@v7
|
||||
uses: https://github.com/actions/github-script@v8
|
||||
with:
|
||||
script: |
|
||||
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
|
||||
|
||||
167
.forgejo/actions/setup-llvm-with-apt/action.yml
Normal file
167
.forgejo/actions/setup-llvm-with-apt/action.yml
Normal file
@@ -0,0 +1,167 @@
|
||||
name: setup-llvm-with-apt
|
||||
description: |
|
||||
Set up LLVM toolchain with APT package management and smart caching.
|
||||
Supports cross-compilation architectures and additional package installation.
|
||||
|
||||
Creates symlinks in /usr/bin: clang, clang++, lld, llvm-ar, llvm-ranlib
|
||||
|
||||
inputs:
|
||||
dpkg-arch:
|
||||
description: 'Debian architecture for cross-compilation (e.g. arm64)'
|
||||
required: false
|
||||
default: ''
|
||||
extra-packages:
|
||||
description: 'Additional APT packages to install (space-separated)'
|
||||
required: false
|
||||
default: ''
|
||||
llvm-version:
|
||||
description: 'LLVM version to install'
|
||||
required: false
|
||||
default: '20'
|
||||
|
||||
outputs:
|
||||
llvm-version:
|
||||
description: 'Installed LLVM version'
|
||||
value: ${{ steps.configure.outputs.version }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Detect runner OS
|
||||
id: runner-os
|
||||
uses: https://git.tomfos.tr/actions/detect-versions@v1
|
||||
|
||||
- name: Configure cross-compilation architecture
|
||||
if: inputs.dpkg-arch != ''
|
||||
shell: bash
|
||||
run: |
|
||||
echo "🏗️ Adding ${{ inputs.dpkg-arch }} architecture"
|
||||
sudo dpkg --add-architecture ${{ inputs.dpkg-arch }}
|
||||
|
||||
# Restrict default sources to amd64
|
||||
sudo sed -i 's/^deb http/deb [arch=amd64] http/g' /etc/apt/sources.list
|
||||
sudo sed -i 's/^deb https/deb [arch=amd64] https/g' /etc/apt/sources.list
|
||||
|
||||
# Add ports sources for foreign architecture
|
||||
sudo tee /etc/apt/sources.list.d/${{ inputs.dpkg-arch }}.list > /dev/null <<EOF
|
||||
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy main restricted universe multiverse
|
||||
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-updates main restricted universe multiverse
|
||||
deb [arch=${{ inputs.dpkg-arch }}] http://ports.ubuntu.com/ubuntu-ports/ jammy-security main restricted universe multiverse
|
||||
EOF
|
||||
|
||||
echo "✅ Architecture ${{ inputs.dpkg-arch }} configured"
|
||||
|
||||
- name: Start LLVM cache group
|
||||
shell: bash
|
||||
run: echo "::group::📦 Restoring LLVM cache"
|
||||
|
||||
- name: Check for LLVM cache
|
||||
id: cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/usr/bin/clang-*
|
||||
/usr/bin/clang++-*
|
||||
/usr/bin/lld-*
|
||||
/usr/bin/llvm-*
|
||||
/usr/lib/llvm-*/
|
||||
/usr/lib/x86_64-linux-gnu/libLLVM*.so*
|
||||
/usr/lib/x86_64-linux-gnu/libclang*.so*
|
||||
/etc/apt/sources.list.d/archive_uri-*
|
||||
/etc/apt/trusted.gpg.d/apt.llvm.org.asc
|
||||
key: continuwuity-llvm-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-v${{ inputs.llvm-version }}-${{ hashFiles('**/Cargo.lock', 'rust-toolchain.toml') }}
|
||||
|
||||
- name: End LLVM cache group
|
||||
shell: bash
|
||||
run: echo "::endgroup::"
|
||||
|
||||
- name: Check and install LLVM if needed
|
||||
id: llvm-setup
|
||||
shell: bash
|
||||
run: |
|
||||
echo "🔍 Checking for LLVM ${{ inputs.llvm-version }}..."
|
||||
|
||||
# Check both binaries and libraries exist
|
||||
if [ -f "/usr/bin/clang-${{ inputs.llvm-version }}" ] && \
|
||||
[ -f "/usr/bin/clang++-${{ inputs.llvm-version }}" ] && \
|
||||
[ -f "/usr/bin/lld-${{ inputs.llvm-version }}" ] && \
|
||||
([ -f "/usr/lib/x86_64-linux-gnu/libLLVM.so.${{ inputs.llvm-version }}.1" ] || \
|
||||
[ -f "/usr/lib/x86_64-linux-gnu/libLLVM-${{ inputs.llvm-version }}.so.1" ] || \
|
||||
[ -f "/usr/lib/llvm-${{ inputs.llvm-version }}/lib/libLLVM.so" ]); then
|
||||
echo "✅ LLVM ${{ inputs.llvm-version }} found and verified"
|
||||
echo "needs-install=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "📦 LLVM ${{ inputs.llvm-version }} not found or incomplete - installing..."
|
||||
|
||||
echo "::group::🔧 Installing LLVM ${{ inputs.llvm-version }}"
|
||||
wget -O - https://apt.llvm.org/llvm.sh | bash -s -- ${{ inputs.llvm-version }}
|
||||
echo "::endgroup::"
|
||||
|
||||
if [ ! -f "/usr/bin/clang-${{ inputs.llvm-version }}" ]; then
|
||||
echo "❌ Failed to install LLVM ${{ inputs.llvm-version }}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "✅ Installed LLVM ${{ inputs.llvm-version }}"
|
||||
echo "needs-install=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Prepare for additional packages
|
||||
if: inputs.extra-packages != ''
|
||||
shell: bash
|
||||
run: |
|
||||
# Update APT if LLVM was cached (installer script already does apt-get update)
|
||||
if [[ "${{ steps.llvm-setup.outputs.needs-install }}" != "true" ]]; then
|
||||
echo "::group::📦 Running apt-get update (LLVM cached, extra packages needed)"
|
||||
sudo apt-get update
|
||||
echo "::endgroup::"
|
||||
fi
|
||||
echo "::group::📦 Installing additional packages"
|
||||
|
||||
- name: Install additional packages
|
||||
if: inputs.extra-packages != ''
|
||||
uses: https://github.com/awalsh128/cache-apt-pkgs-action@latest
|
||||
with:
|
||||
packages: ${{ inputs.extra-packages }}
|
||||
version: 1.0
|
||||
|
||||
- name: End package installation group
|
||||
if: inputs.extra-packages != ''
|
||||
shell: bash
|
||||
run: echo "::endgroup::"
|
||||
|
||||
- name: Configure LLVM environment
|
||||
id: configure
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::🔧 Configuring LLVM ${{ inputs.llvm-version }} environment"
|
||||
|
||||
# Create symlinks
|
||||
sudo ln -sf "/usr/bin/clang-${{ inputs.llvm-version }}" /usr/bin/clang
|
||||
sudo ln -sf "/usr/bin/clang++-${{ inputs.llvm-version }}" /usr/bin/clang++
|
||||
sudo ln -sf "/usr/bin/lld-${{ inputs.llvm-version }}" /usr/bin/lld
|
||||
sudo ln -sf "/usr/bin/llvm-ar-${{ inputs.llvm-version }}" /usr/bin/llvm-ar
|
||||
sudo ln -sf "/usr/bin/llvm-ranlib-${{ inputs.llvm-version }}" /usr/bin/llvm-ranlib
|
||||
echo " ✓ Created symlinks"
|
||||
|
||||
# Setup library paths
|
||||
LLVM_LIB_PATH="/usr/lib/llvm-${{ inputs.llvm-version }}/lib"
|
||||
if [ -d "$LLVM_LIB_PATH" ]; then
|
||||
echo "LD_LIBRARY_PATH=${LLVM_LIB_PATH}:${LD_LIBRARY_PATH:-}" >> $GITHUB_ENV
|
||||
echo "LIBCLANG_PATH=${LLVM_LIB_PATH}" >> $GITHUB_ENV
|
||||
|
||||
echo "$LLVM_LIB_PATH" | sudo tee "/etc/ld.so.conf.d/llvm-${{ inputs.llvm-version }}.conf" > /dev/null
|
||||
sudo ldconfig
|
||||
echo " ✓ Configured library paths"
|
||||
else
|
||||
# Fallback to standard library location
|
||||
if [ -d "/usr/lib/x86_64-linux-gnu" ]; then
|
||||
echo "LIBCLANG_PATH=/usr/lib/x86_64-linux-gnu" >> $GITHUB_ENV
|
||||
echo " ✓ Using fallback library path"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Set output
|
||||
echo "version=${{ inputs.llvm-version }}" >> $GITHUB_OUTPUT
|
||||
echo "::endgroup::"
|
||||
echo "✅ LLVM ready: $(clang --version | head -1)"
|
||||
239
.forgejo/actions/setup-rust/action.yml
Normal file
239
.forgejo/actions/setup-rust/action.yml
Normal file
@@ -0,0 +1,239 @@
|
||||
name: setup-rust
|
||||
description: |
|
||||
Set up Rust toolchain with sccache for compilation caching.
|
||||
Respects rust-toolchain.toml by default or accepts explicit version override.
|
||||
|
||||
inputs:
|
||||
cache-key-suffix:
|
||||
description: 'Optional suffix for cache keys (e.g. platform identifier)'
|
||||
required: false
|
||||
default: ''
|
||||
rust-components:
|
||||
description: 'Additional Rust components to install (space-separated)'
|
||||
required: false
|
||||
default: ''
|
||||
rust-target:
|
||||
description: 'Rust target triple (e.g. x86_64-unknown-linux-gnu)'
|
||||
required: false
|
||||
default: ''
|
||||
rust-version:
|
||||
description: 'Rust version to install (e.g. nightly). Defaults to 1.87.0'
|
||||
required: false
|
||||
default: '1.87.0'
|
||||
sccache-cache-limit:
|
||||
description: 'Maximum size limit for sccache local cache (e.g. 2G, 500M)'
|
||||
required: false
|
||||
default: '2G'
|
||||
github-token:
|
||||
description: 'GitHub token for downloading sccache from GitHub releases'
|
||||
required: false
|
||||
default: ''
|
||||
|
||||
outputs:
|
||||
rust-version:
|
||||
description: 'Installed Rust version'
|
||||
value: ${{ steps.rust-setup.outputs.version }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Detect runner OS
|
||||
id: runner-os
|
||||
uses: https://git.tomfos.tr/actions/detect-versions@v1
|
||||
|
||||
- name: Configure Cargo environment
|
||||
shell: bash
|
||||
run: |
|
||||
# Use workspace-relative paths for better control and consistency
|
||||
echo "CARGO_HOME=${{ github.workspace }}/.cargo" >> $GITHUB_ENV
|
||||
echo "CARGO_TARGET_DIR=${{ github.workspace }}/target" >> $GITHUB_ENV
|
||||
echo "SCCACHE_DIR=${{ github.workspace }}/.sccache" >> $GITHUB_ENV
|
||||
echo "RUSTUP_HOME=${{ github.workspace }}/.rustup" >> $GITHUB_ENV
|
||||
|
||||
# Limit binstall resolution timeout to avoid GitHub rate limit delays
|
||||
echo "BINSTALL_MAXIMUM_RESOLUTION_TIMEOUT=10" >> $GITHUB_ENV
|
||||
|
||||
# Ensure directories exist for first run
|
||||
mkdir -p "${{ github.workspace }}/.cargo"
|
||||
mkdir -p "${{ github.workspace }}/.sccache"
|
||||
mkdir -p "${{ github.workspace }}/target"
|
||||
mkdir -p "${{ github.workspace }}/.rustup"
|
||||
|
||||
- name: Start cache restore group
|
||||
shell: bash
|
||||
run: echo "::group::📦 Restoring caches (registry, toolchain, build artifacts)"
|
||||
|
||||
- name: Cache Cargo registry and git
|
||||
id: registry-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cargo/registry/index
|
||||
.cargo/registry/cache
|
||||
.cargo/git/db
|
||||
# Registry cache saved per workflow, restored from any workflow's cache
|
||||
# Each workflow maintains its own registry that accumulates its needed crates
|
||||
key: continuwuity-cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ github.workflow }}
|
||||
restore-keys: |
|
||||
continuwuity-cargo-registry-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-
|
||||
|
||||
- name: Cache toolchain binaries
|
||||
id: toolchain-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
.cargo/bin
|
||||
.rustup/toolchains
|
||||
.rustup/update-hashes
|
||||
# Shared toolchain cache across all Rust versions
|
||||
key: continuwuity-toolchain-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}
|
||||
|
||||
|
||||
- name: Setup sccache
|
||||
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
||||
|
||||
- name: Cache dependencies
|
||||
id: deps-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
target/**/.fingerprint
|
||||
target/**/deps
|
||||
target/**/*.d
|
||||
target/**/.cargo-lock
|
||||
target/**/CACHEDIR.TAG
|
||||
target/**/.rustc_info.json
|
||||
/timelord/
|
||||
# Dependencies cache - based on Cargo.lock, survives source code changes
|
||||
key: >-
|
||||
continuwuity-deps-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
continuwuity-deps-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-
|
||||
|
||||
- name: Cache incremental compilation
|
||||
id: incremental-cache
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
target/**/incremental
|
||||
# Incremental cache - based on source code changes
|
||||
key: >-
|
||||
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-${{ hashFiles('**/*.rs', '**/Cargo.toml') }}
|
||||
restore-keys: |
|
||||
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-${{ hashFiles('rust-toolchain.toml', '**/Cargo.lock') }}-
|
||||
continuwuity-incremental-${{ steps.runner-os.outputs.slug }}-${{ steps.runner-os.outputs.arch }}-${{ inputs.rust-version }}${{ inputs.cache-key-suffix && format('-{0}', inputs.cache-key-suffix) || '' }}-
|
||||
|
||||
- name: End cache restore group
|
||||
shell: bash
|
||||
run: echo "::endgroup::"
|
||||
|
||||
- name: Setup Rust toolchain
|
||||
shell: bash
|
||||
run: |
|
||||
# Install rustup if not already cached
|
||||
if ! command -v rustup &> /dev/null; then
|
||||
echo "::group::📦 Installing rustup"
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain none
|
||||
source "$CARGO_HOME/env"
|
||||
echo "::endgroup::"
|
||||
else
|
||||
echo "✅ rustup already available"
|
||||
fi
|
||||
|
||||
# Setup the appropriate Rust version
|
||||
if [[ -n "${{ inputs.rust-version }}" ]]; then
|
||||
echo "::group::📦 Setting up Rust ${{ inputs.rust-version }}"
|
||||
# Set override first to prevent rust-toolchain.toml from auto-installing
|
||||
rustup override set ${{ inputs.rust-version }} 2>/dev/null || true
|
||||
|
||||
# Check if we need to install/update the toolchain
|
||||
if rustup toolchain list | grep -q "^${{ inputs.rust-version }}-"; then
|
||||
rustup update ${{ inputs.rust-version }}
|
||||
else
|
||||
rustup toolchain install ${{ inputs.rust-version }} --profile minimal -c cargo,clippy,rustfmt
|
||||
fi
|
||||
else
|
||||
echo "::group::📦 Setting up Rust from rust-toolchain.toml"
|
||||
rustup show
|
||||
fi
|
||||
echo "::endgroup::"
|
||||
|
||||
- name: Configure PATH and install tools
|
||||
shell: bash
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ inputs.github-token }}
|
||||
run: |
|
||||
# Add .cargo/bin to PATH permanently for all subsequent steps
|
||||
echo "${{ github.workspace }}/.cargo/bin" >> $GITHUB_PATH
|
||||
|
||||
# For this step only, we need to add it to PATH since GITHUB_PATH takes effect in the next step
|
||||
export PATH="${{ github.workspace }}/.cargo/bin:$PATH"
|
||||
|
||||
# Install cargo-binstall for fast binary installations
|
||||
if command -v cargo-binstall &> /dev/null; then
|
||||
echo "✅ cargo-binstall already available"
|
||||
else
|
||||
echo "::group::📦 Installing cargo-binstall"
|
||||
curl -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
|
||||
echo "::endgroup::"
|
||||
fi
|
||||
|
||||
if command -v prek &> /dev/null; then
|
||||
echo "✅ prek already available"
|
||||
else
|
||||
echo "::group::📦 Installing prek"
|
||||
# prek isn't regularly published to crates.io, so we use git source
|
||||
cargo-binstall -y --no-symlinks --git https://github.com/j178/prek prek
|
||||
echo "::endgroup::"
|
||||
fi
|
||||
|
||||
if command -v timelord &> /dev/null; then
|
||||
echo "✅ timelord already available"
|
||||
else
|
||||
echo "::group::📦 Installing timelord"
|
||||
cargo-binstall -y --no-symlinks timelord-cli
|
||||
echo "::endgroup::"
|
||||
fi
|
||||
|
||||
- name: Configure sccache environment
|
||||
shell: bash
|
||||
run: |
|
||||
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_CUDA_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||
echo "SCCACHE_GHA_ENABLED=true" >> $GITHUB_ENV
|
||||
|
||||
# Configure incremental compilation GC
|
||||
# If we restored from old cache (partial hit), clean up aggressively
|
||||
if [[ "${{ steps.build-cache.outputs.cache-hit }}" != "true" ]]; then
|
||||
echo "♻️ Partial cache hit - enabling cache cleanup"
|
||||
echo "CARGO_INCREMENTAL_GC_THRESHOLD=5" >> $GITHUB_ENV
|
||||
fi
|
||||
|
||||
- name: Install Rust components
|
||||
if: inputs.rust-components != ''
|
||||
shell: bash
|
||||
run: |
|
||||
echo "📦 Installing components: ${{ inputs.rust-components }}"
|
||||
rustup component add ${{ inputs.rust-components }}
|
||||
|
||||
- name: Install Rust target
|
||||
if: inputs.rust-target != ''
|
||||
shell: bash
|
||||
run: |
|
||||
echo "📦 Installing target: ${{ inputs.rust-target }}"
|
||||
rustup target add ${{ inputs.rust-target }}
|
||||
|
||||
- name: Output version and summary
|
||||
id: rust-setup
|
||||
shell: bash
|
||||
run: |
|
||||
RUST_VERSION=$(rustc --version | cut -d' ' -f2)
|
||||
echo "version=$RUST_VERSION" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📋 Setup complete:"
|
||||
echo " Rust: $(rustc --version)"
|
||||
echo " Cargo: $(cargo --version)"
|
||||
echo " prek: $(prek --version 2>/dev/null || echo 'installed')"
|
||||
echo " timelord: $(timelord --version 2>/dev/null || echo 'installed')"
|
||||
@@ -1,46 +1,120 @@
|
||||
name: timelord
|
||||
description: |
|
||||
Use timelord to set file timestamps
|
||||
Use timelord to set file timestamps with git-warp-time fallback for cache misses
|
||||
inputs:
|
||||
key:
|
||||
description: |
|
||||
The key to use for caching the timelord data.
|
||||
This should be unique to the repository and the runner.
|
||||
required: true
|
||||
default: timelord-v0
|
||||
required: false
|
||||
default: ''
|
||||
path:
|
||||
description: |
|
||||
The path to the directory to be timestamped.
|
||||
This should be the root of the repository.
|
||||
required: true
|
||||
default: .
|
||||
required: false
|
||||
default: ''
|
||||
|
||||
outputs:
|
||||
database-path:
|
||||
description: Path to timelord database
|
||||
value: '${{ env.TIMELORD_CACHE_PATH }}'
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Cache timelord-cli installation
|
||||
id: cache-timelord-bin
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cargo/bin/timelord
|
||||
key: timelord-cli-v3.0.1
|
||||
- name: Install timelord-cli
|
||||
uses: https://github.com/cargo-bins/cargo-binstall@main
|
||||
if: steps.cache-timelord-bin.outputs.cache-hit != 'true'
|
||||
- run: cargo binstall timelord-cli@3.0.1
|
||||
- name: Set defaults
|
||||
shell: bash
|
||||
if: steps.cache-timelord-bin.outputs.cache-hit != 'true'
|
||||
run: |
|
||||
echo "TIMELORD_KEY=${{ inputs.key || format('timelord-v1-{0}-{1}', github.repository, hashFiles('**/*.rs', '**/Cargo.toml', '**/Cargo.lock')) }}" >> $GITHUB_ENV
|
||||
echo "TIMELORD_PATH=${{ inputs.path || '.' }}" >> $GITHUB_ENV
|
||||
echo "TIMELORD_CACHE_PATH=$HOME/.cache/timelord" >> $GITHUB_ENV
|
||||
echo "PATH=$HOME/.cargo/bin:/usr/share/rust/.cargo/bin:$PATH" >> $GITHUB_ENV
|
||||
|
||||
- name: Load timelord files
|
||||
uses: actions/cache/restore@v3
|
||||
- name: Restore binary cache
|
||||
id: binary-cache
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: /timelord/
|
||||
key: ${{ inputs.key }}
|
||||
- name: Run timelord to set timestamps
|
||||
path: |
|
||||
/usr/share/rust/.cargo/bin
|
||||
~/.cargo/bin
|
||||
key: continuwuity-timelord-binaries
|
||||
|
||||
- name: Check if binaries need installation
|
||||
shell: bash
|
||||
run: timelord sync --source-dir ${{ inputs.path }} --cache-dir /timelord/
|
||||
- name: Save timelord
|
||||
uses: actions/cache/save@v3
|
||||
id: check-binaries
|
||||
run: |
|
||||
NEED_INSTALL=false
|
||||
|
||||
# Ensure ~/.cargo/bin exists
|
||||
mkdir -p ~/.cargo/bin
|
||||
|
||||
# Check and move timelord if needed
|
||||
if [ -f /usr/share/rust/.cargo/bin/timelord ] && [ ! -f ~/.cargo/bin/timelord ]; then
|
||||
echo "Moving timelord from /usr/share/rust/.cargo/bin to ~/.cargo/bin"
|
||||
mv /usr/share/rust/.cargo/bin/timelord ~/.cargo/bin/
|
||||
fi
|
||||
if [ ! -f ~/.cargo/bin/timelord ]; then
|
||||
echo "timelord-cli not found, needs installation"
|
||||
NEED_INSTALL=true
|
||||
fi
|
||||
|
||||
# Check and move git-warp-time if needed
|
||||
if [ -f /usr/share/rust/.cargo/bin/git-warp-time ] && [ ! -f ~/.cargo/bin/git-warp-time ]; then
|
||||
echo "Moving git-warp-time from /usr/share/rust/.cargo/bin to ~/.cargo/bin"
|
||||
mv /usr/share/rust/.cargo/bin/git-warp-time ~/.cargo/bin/
|
||||
fi
|
||||
if [ ! -f ~/.cargo/bin/git-warp-time ]; then
|
||||
echo "git-warp-time not found, needs installation"
|
||||
NEED_INSTALL=true
|
||||
fi
|
||||
|
||||
echo "need-install=$NEED_INSTALL" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install timelord-cli and git-warp-time
|
||||
if: steps.check-binaries.outputs.need-install == 'true'
|
||||
uses: https://github.com/taiki-e/install-action@v2
|
||||
with:
|
||||
path: /timelord/
|
||||
key: ${{ inputs.key }}
|
||||
tool: git-warp-time,timelord-cli@3.0.1
|
||||
|
||||
- name: Save binary cache
|
||||
if: steps.check-binaries.outputs.need-install == 'true'
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
/usr/share/rust/.cargo/bin
|
||||
~/.cargo/bin
|
||||
key: continuwuity-timelord-binaries
|
||||
|
||||
|
||||
- name: Restore timelord cache with fallbacks
|
||||
id: timelord-restore
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: ${{ env.TIMELORD_CACHE_PATH }}
|
||||
key: ${{ env.TIMELORD_KEY }}
|
||||
restore-keys: |
|
||||
continuwuity-timelord-${{ github.repository }}-
|
||||
|
||||
- name: Initialize timestamps on complete cache miss
|
||||
if: steps.timelord-restore.outputs.cache-hit != 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
echo "Complete timelord cache miss - running git-warp-time"
|
||||
git fetch --unshallow
|
||||
if [ "${{ env.TIMELORD_PATH }}" = "." ]; then
|
||||
git-warp-time --quiet
|
||||
else
|
||||
git-warp-time --quiet ${{ env.TIMELORD_PATH }}
|
||||
fi
|
||||
echo "Git timestamps restored"
|
||||
|
||||
- name: Run timelord sync
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ${{ env.TIMELORD_CACHE_PATH }}
|
||||
timelord sync --source-dir ${{ env.TIMELORD_PATH }} --cache-dir ${{ env.TIMELORD_CACHE_PATH }}
|
||||
|
||||
- name: Save updated timelord cache immediately
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: ${{ env.TIMELORD_CACHE_PATH }}
|
||||
key: ${{ env.TIMELORD_KEY }}
|
||||
|
||||
70
.forgejo/actions/upload-docker-artifacts/action.yml
Normal file
70
.forgejo/actions/upload-docker-artifacts/action.yml
Normal file
@@ -0,0 +1,70 @@
|
||||
name: upload-docker-artifacts
|
||||
description: |
|
||||
Upload Docker build artifacts including binary and digest files.
|
||||
Handles artifact naming and conditional digest uploads for registry publishing.
|
||||
|
||||
inputs:
|
||||
slug:
|
||||
description: Platform slug for artifact naming (e.g. linux-amd64, linux-arm64)
|
||||
required: true
|
||||
cpu_suffix:
|
||||
description: CPU suffix for artifact naming (e.g. -haswell)
|
||||
required: false
|
||||
default: ""
|
||||
artifact_suffix:
|
||||
description: Suffix for binary artifacts (e.g. -maxperf)
|
||||
required: false
|
||||
default: ""
|
||||
digest_suffix:
|
||||
description: Suffix for digest artifacts (e.g. -maxperf)
|
||||
required: false
|
||||
default: ""
|
||||
digest:
|
||||
description: The digest of the built Docker image
|
||||
required: true
|
||||
|
||||
outputs:
|
||||
binary_artifact_name:
|
||||
description: The name of the uploaded binary artifact
|
||||
value: conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Export digest
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ inputs.digest }}"
|
||||
echo "🔍 Build step digest output: '$digest'"
|
||||
if [[ -z "$digest" ]]; then
|
||||
echo "❌ ERROR: No digest found from build step"
|
||||
exit 1
|
||||
fi
|
||||
digest_file="/tmp/digests/${digest#sha256:}"
|
||||
echo "📁 Creating digest file: $digest_file"
|
||||
touch "$digest_file"
|
||||
echo "✅ Digest file created successfully"
|
||||
echo "📋 Contents of /tmp/digests:"
|
||||
ls -la /tmp/digests/
|
||||
|
||||
- name: Rename extracted binary
|
||||
shell: bash
|
||||
run: mv /tmp/binaries/sbin/conduwuit /tmp/binaries/conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
|
||||
|
||||
- name: Upload binary artifact
|
||||
uses: forgejo/upload-artifact@v4
|
||||
with:
|
||||
name: conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
|
||||
path: /tmp/binaries/conduwuit${{ inputs.cpu_suffix }}-${{ inputs.slug }}${{ inputs.artifact_suffix }}
|
||||
if-no-files-found: error
|
||||
|
||||
- name: Upload digest
|
||||
if: ${{ env.BUILTIN_REGISTRY_ENABLED == 'true' }}
|
||||
uses: forgejo/upload-artifact@v4
|
||||
with:
|
||||
name: digests${{ inputs.digest_suffix }}-${{ inputs.slug }}${{ inputs.cpu_suffix }}
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 5
|
||||
148
.forgejo/workflows/build-debian.yml
Normal file
148
.forgejo/workflows/build-debian.yml
Normal file
@@ -0,0 +1,148 @@
|
||||
name: Build / Debian DEB
|
||||
|
||||
concurrency:
|
||||
group: "build-debian-${{ forge.ref }}"
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '30 0 * * *'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
container: ["ubuntu-latest", "ubuntu-previous", "debian-latest", "debian-oldstable"]
|
||||
container:
|
||||
image: "ghcr.io/tcpipuk/act-runner:${{ matrix.container }}"
|
||||
|
||||
steps:
|
||||
- name: Get Debian version
|
||||
id: debian-version
|
||||
run: |
|
||||
VERSION=$(cat /etc/debian_version)
|
||||
DISTRIBUTION=$(lsb_release -sc 2>/dev/null)
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "distribution=$DISTRIBUTION" >> $GITHUB_OUTPUT
|
||||
echo "Debian distribution: $DISTRIBUTION ($VERSION)"
|
||||
|
||||
- name: Checkout repository with full history
|
||||
uses: https://code.forgejo.org/actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Cache Cargo registry
|
||||
uses: https://code.forgejo.org/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
key: cargo-debian-${{ steps.debian-version.outputs.distribution }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
cargo-debian-${{ steps.debian-version.outputs.distribution }}-
|
||||
|
||||
- name: Setup sccache
|
||||
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
||||
|
||||
- name: Configure sccache environment
|
||||
run: |
|
||||
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||
echo "SCCACHE_CACHE_SIZE=10G" >> $GITHUB_ENV
|
||||
# Aggressive GC since cache restores don't increment counter
|
||||
echo "CARGO_INCREMENTAL_GC_TRIGGER=5" >> $GITHUB_ENV
|
||||
|
||||
- name: Setup Rust nightly
|
||||
uses: ./.forgejo/actions/setup-rust
|
||||
with:
|
||||
rust-version: nightly
|
||||
github-token: ${{ secrets.GH_PUBLIC_RO }}
|
||||
|
||||
- name: Get package version and component
|
||||
id: package-meta
|
||||
run: |
|
||||
BASE_VERSION=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.name == \"conduwuit\").version" | sed 's/[^a-zA-Z0-9.+]/~/g')
|
||||
# VERSION is the package version, COMPONENT is used in
|
||||
# apt's repository config like a git repo branch
|
||||
if [[ "${{ forge.ref }}" == "refs/tags/"* ]]; then
|
||||
# Use the "stable" component for tagged releases
|
||||
COMPONENT="stable"
|
||||
VERSION=$BASE_VERSION
|
||||
else
|
||||
# Use the "dev" component for development builds
|
||||
SHA=$(echo "${{ forge.sha }}" | cut -c1-7)
|
||||
DATE=$(date +%Y%m%d)
|
||||
if [ "${{ forge.ref_name }}" = "main" ]; then
|
||||
COMPONENT="dev"
|
||||
else
|
||||
# Use the sanitized ref name as the component for feature branches
|
||||
COMPONENT="dev-$(echo '${{ forge.ref_name }}' | sed 's/[^a-zA-Z0-9.+]/-/g' | tr '[:upper:]' '[:lower:]' | cut -c1-30)"
|
||||
fi
|
||||
CLEAN_COMPONENT=$(echo $COMPONENT | sed 's/[^a-zA-Z0-9.+]/~/g')
|
||||
VERSION="$BASE_VERSION~git$DATE.$SHA-$CLEAN_COMPONENT"
|
||||
fi
|
||||
echo "component=$COMPONENT" >> $GITHUB_OUTPUT
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Component: $COMPONENT"
|
||||
echo "Version: $VERSION"
|
||||
|
||||
- name: Install cargo-deb
|
||||
run: |
|
||||
if command -v cargo-deb &> /dev/null; then
|
||||
echo "cargo-deb already available"
|
||||
else
|
||||
echo "Installing cargo-deb"
|
||||
cargo-binstall -y --no-symlinks cargo-deb
|
||||
fi
|
||||
|
||||
- name: Install build dependencies
|
||||
run: |
|
||||
apt-get update -y
|
||||
# Build dependencies for rocksdb
|
||||
apt-get install -y clang liburing-dev
|
||||
|
||||
- name: Run cargo-deb
|
||||
id: cargo-deb
|
||||
run: |
|
||||
DEB_PATH=$(cargo deb --deb-version ${{ steps.package-meta.outputs.version }})
|
||||
echo "path=$DEB_PATH" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Test deb installation
|
||||
run: |
|
||||
echo "Installing: ${{ steps.cargo-deb.outputs.path }}"
|
||||
|
||||
apt-get install -y ${{ steps.cargo-deb.outputs.path }}
|
||||
|
||||
dpkg -s continuwuity
|
||||
|
||||
[ -f /usr/bin/conduwuit ] && echo "✅ Binary installed successfully"
|
||||
[ -f /usr/lib/systemd/system/conduwuit.service ] && echo "✅ Systemd service installed"
|
||||
[ -f /etc/conduwuit/conduwuit.toml ] && echo "✅ Config file installed"
|
||||
|
||||
- name: Upload deb artifact
|
||||
uses: https://code.forgejo.org/actions/upload-artifact@v3
|
||||
with:
|
||||
name: continuwuity-${{ steps.debian-version.outputs.distribution }}
|
||||
path: ${{ steps.cargo-deb.outputs.path }}
|
||||
|
||||
- name: Publish to Forgejo package registry
|
||||
if: ${{ forge.event_name == 'push' || forge.event_name == 'workflow_dispatch' || forge.event_name == 'schedule' }}
|
||||
run: |
|
||||
OWNER="continuwuation"
|
||||
DISTRIBUTION=${{ steps.debian-version.outputs.distribution }}
|
||||
COMPONENT=${{ steps.package-meta.outputs.component }}
|
||||
DEB=${{ steps.cargo-deb.outputs.path }}
|
||||
|
||||
echo "Publishing: $DEB in component $COMPONENT for distribution $DISTRIBUTION"
|
||||
|
||||
curl --fail-with-body \
|
||||
-X PUT \
|
||||
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
|
||||
--upload-file "$DEB" \
|
||||
"${{ forge.server_url }}/api/packages/$OWNER/debian/pool/$DISTRIBUTION/$COMPONENT/upload"
|
||||
389
.forgejo/workflows/build-fedora.yml
Normal file
389
.forgejo/workflows/build-fedora.yml
Normal file
@@ -0,0 +1,389 @@
|
||||
name: Build / Fedora RPM
|
||||
|
||||
concurrency:
|
||||
group: "build-fedora-${{ github.ref }}"
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
# paths:
|
||||
# - 'pkg/fedora/**'
|
||||
# - 'src/**'
|
||||
# - 'Cargo.toml'
|
||||
# - 'Cargo.lock'
|
||||
# - '.forgejo/workflows/build-fedora.yml'
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: '30 0 * * *'
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: fedora-latest
|
||||
steps:
|
||||
- name: Detect Fedora version
|
||||
id: fedora
|
||||
run: |
|
||||
VERSION=$(rpm -E %fedora)
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "Fedora version: $VERSION"
|
||||
|
||||
- name: Checkout repository with full history
|
||||
uses: https://code.forgejo.org/actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
- name: Cache DNF packages
|
||||
uses: https://code.forgejo.org/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/var/cache/dnf
|
||||
/var/cache/yum
|
||||
key: dnf-fedora${{ steps.fedora.outputs.version }}-${{ hashFiles('pkg/fedora/continuwuity.spec.rpkg') }}-v1
|
||||
restore-keys: |
|
||||
dnf-fedora${{ steps.fedora.outputs.version }}-
|
||||
|
||||
- name: Cache Cargo registry
|
||||
uses: https://code.forgejo.org/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
key: cargo-fedora${{ steps.fedora.outputs.version }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
cargo-fedora${{ steps.fedora.outputs.version }}-
|
||||
|
||||
- name: Cache Rust build dependencies
|
||||
uses: https://code.forgejo.org/actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/rpmbuild/BUILD/*/target/release/deps
|
||||
~/rpmbuild/BUILD/*/target/release/build
|
||||
~/rpmbuild/BUILD/*/target/release/.fingerprint
|
||||
~/rpmbuild/BUILD/*/target/release/incremental
|
||||
key: rust-deps-fedora${{ steps.fedora.outputs.version }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
rust-deps-fedora${{ steps.fedora.outputs.version }}-
|
||||
|
||||
- name: Setup sccache
|
||||
uses: https://git.tomfos.tr/tom/sccache-action@v1
|
||||
|
||||
- name: Configure sccache environment
|
||||
run: |
|
||||
echo "RUSTC_WRAPPER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_C_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||
echo "CMAKE_CXX_COMPILER_LAUNCHER=sccache" >> $GITHUB_ENV
|
||||
echo "SCCACHE_CACHE_SIZE=10G" >> $GITHUB_ENV
|
||||
# Aggressive GC since cache restores don't increment counter
|
||||
echo "CARGO_INCREMENTAL_GC_TRIGGER=5" >> $GITHUB_ENV
|
||||
|
||||
- name: Install base RPM tools
|
||||
run: |
|
||||
dnf install -y --setopt=keepcache=1 \
|
||||
fedora-packager \
|
||||
python3-pip \
|
||||
rpm-sign \
|
||||
rpkg \
|
||||
wget
|
||||
|
||||
- name: Setup build environment and build SRPM
|
||||
run: |
|
||||
git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
||||
git config --global user.email "ci@continuwuity.org"
|
||||
git config --global user.name "Continuwuity"
|
||||
|
||||
rpmdev-setuptree
|
||||
|
||||
cd "$GITHUB_WORKSPACE"
|
||||
|
||||
# Determine release suffix and version based on ref type and branch
|
||||
if [[ "${{ github.ref }}" == "refs/tags/"* ]]; then
|
||||
# Tags get clean version numbers for stable releases
|
||||
RELEASE_SUFFIX=""
|
||||
TAG_NAME="${{ github.ref_name }}"
|
||||
# Extract version from tag (remove v prefix if present)
|
||||
TAG_VERSION=$(echo "$TAG_NAME" | sed 's/^v//')
|
||||
|
||||
# Create spec file with tag version
|
||||
sed -e "s/^Version:.*$/Version: $TAG_VERSION/" \
|
||||
-e "s/^Release:.*$/Release: 1%{?dist}/" \
|
||||
pkg/fedora/continuwuity.spec.rpkg > continuwuity.spec.rpkg
|
||||
elif [ "${{ github.ref_name }}" = "main" ]; then
|
||||
# Main branch gets .dev suffix
|
||||
RELEASE_SUFFIX=".dev"
|
||||
|
||||
# Replace the Release line to include our suffix
|
||||
sed "s/^Release:.*$/Release: 1${RELEASE_SUFFIX}%{?dist}/" \
|
||||
pkg/fedora/continuwuity.spec.rpkg > continuwuity.spec.rpkg
|
||||
else
|
||||
# Other branches get sanitized branch name as suffix
|
||||
SAFE_BRANCH=$(echo "${{ github.ref_name }}" | sed 's/[^a-zA-Z0-9]/_/g' | cut -c1-20)
|
||||
RELEASE_SUFFIX=".${SAFE_BRANCH}"
|
||||
|
||||
# Replace the Release line to include our suffix
|
||||
sed "s/^Release:.*$/Release: 1${RELEASE_SUFFIX}%{?dist}/" \
|
||||
pkg/fedora/continuwuity.spec.rpkg > continuwuity.spec.rpkg
|
||||
fi
|
||||
|
||||
rpkg srpm --outdir "$HOME/rpmbuild/SRPMS"
|
||||
|
||||
ls -la $HOME/rpmbuild/SRPMS/
|
||||
|
||||
|
||||
- name: Install build dependencies from SRPM
|
||||
run: |
|
||||
SRPM=$(find "$HOME/rpmbuild/SRPMS" -name "*.src.rpm" | head -1)
|
||||
|
||||
if [ -z "$SRPM" ]; then
|
||||
echo "Error: No SRPM file found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Installing build dependencies from: $(basename $SRPM)"
|
||||
dnf builddep -y "$SRPM"
|
||||
|
||||
- name: Build RPM from SRPM
|
||||
run: |
|
||||
SRPM=$(find "$HOME/rpmbuild/SRPMS" -name "*.src.rpm" | head -1)
|
||||
|
||||
if [ -z "$SRPM" ]; then
|
||||
echo "Error: No SRPM file found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Building from SRPM: $SRPM"
|
||||
|
||||
rpmbuild --rebuild "$SRPM" \
|
||||
--define "_topdir $HOME/rpmbuild" \
|
||||
--define "_sourcedir $GITHUB_WORKSPACE" \
|
||||
--nocheck # Skip %check section to avoid test dependencies
|
||||
|
||||
|
||||
- name: Test RPM installation
|
||||
run: |
|
||||
# Find the main binary RPM (exclude debug and source RPMs)
|
||||
RPM=$(find "$HOME/rpmbuild/RPMS" -name "continuwuity-*.rpm" \
|
||||
! -name "*debuginfo*" \
|
||||
! -name "*debugsource*" \
|
||||
! -name "*.src.rpm" | head -1)
|
||||
|
||||
if [ -z "$RPM" ]; then
|
||||
echo "Error: No binary RPM file found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing installation of: $RPM"
|
||||
|
||||
# Dry run first
|
||||
rpm -qpi "$RPM"
|
||||
echo ""
|
||||
rpm -qpl "$RPM"
|
||||
|
||||
# Actually install it
|
||||
dnf install -y "$RPM"
|
||||
|
||||
# Verify installation
|
||||
rpm -qa | grep continuwuity
|
||||
|
||||
# Check that the binary exists
|
||||
[ -f /usr/bin/conduwuit ] && echo "✅ Binary installed successfully"
|
||||
[ -f /usr/lib/systemd/system/conduwuit.service ] && echo "✅ Systemd service installed"
|
||||
[ -f /etc/conduwuit/conduwuit.toml ] && echo "✅ Config file installed"
|
||||
|
||||
- name: List built packages
|
||||
run: |
|
||||
echo "Binary RPMs:"
|
||||
find "$HOME/rpmbuild/RPMS" -name "*.rpm" -type f -exec ls -la {} \;
|
||||
|
||||
echo ""
|
||||
echo "Source RPMs:"
|
||||
find "$HOME/rpmbuild/SRPMS" -name "*.rpm" -type f -exec ls -la {} \;
|
||||
|
||||
- name: Collect artifacts
|
||||
run: |
|
||||
mkdir -p artifacts
|
||||
|
||||
find "$HOME/rpmbuild/RPMS" -name "*.rpm" -type f -exec cp {} artifacts/ \;
|
||||
find "$HOME/rpmbuild/SRPMS" -name "*.rpm" -type f -exec cp {} artifacts/ \;
|
||||
|
||||
cd artifacts
|
||||
echo "Build Information:" > BUILD_INFO.txt
|
||||
echo "==================" >> BUILD_INFO.txt
|
||||
echo "Git commit: ${{ github.sha }}" >> BUILD_INFO.txt
|
||||
echo "Git branch: ${{ github.ref_name }}" >> BUILD_INFO.txt
|
||||
echo "Build date: $(date -u +%Y-%m-%d_%H:%M:%S_UTC)" >> BUILD_INFO.txt
|
||||
echo "" >> BUILD_INFO.txt
|
||||
echo "Package contents:" >> BUILD_INFO.txt
|
||||
echo "-----------------" >> BUILD_INFO.txt
|
||||
for rpm in *.rpm; do
|
||||
echo "" >> BUILD_INFO.txt
|
||||
echo "File: $rpm" >> BUILD_INFO.txt
|
||||
rpm -qpi "$rpm" 2>/dev/null | grep -E "^(Name|Version|Release|Architecture|Size)" >> BUILD_INFO.txt
|
||||
done
|
||||
|
||||
ls -la
|
||||
|
||||
- name: Upload binary RPM artifact
|
||||
run: |
|
||||
# Find the main binary RPM (exclude debug and source RPMs)
|
||||
BIN_RPM=$(find artifacts -name "continuwuity-*.rpm" \
|
||||
! -name "*debuginfo*" \
|
||||
! -name "*debugsource*" \
|
||||
! -name "*.src.rpm" \
|
||||
-type f)
|
||||
|
||||
mkdir -p upload-bin
|
||||
cp $BIN_RPM upload-bin/
|
||||
|
||||
- name: Upload binary RPM
|
||||
uses: https://code.forgejo.org/actions/upload-artifact@v3
|
||||
with:
|
||||
name: continuwuity
|
||||
path: upload-bin/
|
||||
|
||||
- name: Upload debug RPM artifact
|
||||
uses: https://code.forgejo.org/actions/upload-artifact@v3
|
||||
with:
|
||||
name: continuwuity-debug
|
||||
path: artifacts/*debuginfo*.rpm
|
||||
|
||||
- name: Publish to RPM Package Registry
|
||||
if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule' }}
|
||||
run: |
|
||||
# Find the main binary RPM (exclude debug and source RPMs)
|
||||
RPM=$(find artifacts -name "continuwuity-*.rpm" \
|
||||
! -name "*debuginfo*" \
|
||||
! -name "*debugsource*" \
|
||||
! -name "*.src.rpm" \
|
||||
-type f | head -1)
|
||||
|
||||
if [ -z "$RPM" ]; then
|
||||
echo "No binary RPM found to publish"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
RPM_BASENAME=$(basename "$RPM")
|
||||
echo "Publishing: $RPM_BASENAME"
|
||||
|
||||
# Determine the group based on ref type and branch
|
||||
if [[ "${{ github.ref }}" == "refs/tags/"* ]]; then
|
||||
GROUP="stable"
|
||||
# For tags, extract the tag name for version info
|
||||
TAG_NAME="${{ github.ref_name }}"
|
||||
elif [ "${{ github.ref_name }}" = "main" ]; then
|
||||
GROUP="dev"
|
||||
else
|
||||
# Use sanitized branch name as group for feature branches
|
||||
GROUP=$(echo "${{ github.ref_name }}" | sed 's/[^a-zA-Z0-9]/-/g' | tr '[:upper:]' '[:lower:]' | cut -c1-30)
|
||||
fi
|
||||
|
||||
PACKAGE_INFO=$(rpm -qpi "$RPM" 2>/dev/null)
|
||||
PACKAGE_NAME=$(echo "$PACKAGE_INFO" | grep "^Name" | awk '{print $3}')
|
||||
PACKAGE_VERSION=$(echo "$PACKAGE_INFO" | grep "^Version" | awk '{print $3}')
|
||||
PACKAGE_RELEASE=$(echo "$PACKAGE_INFO" | grep "^Release" | awk '{print $3}')
|
||||
PACKAGE_ARCH=$(echo "$PACKAGE_INFO" | grep "^Architecture" | awk '{print $2}')
|
||||
|
||||
# Full version includes release
|
||||
FULL_VERSION="${PACKAGE_VERSION}-${PACKAGE_RELEASE}"
|
||||
|
||||
# Forgejo's RPM registry cannot overwrite existing packages, so we must delete first
|
||||
# 404 is OK if package doesn't exist yet
|
||||
echo "Removing any existing package: $PACKAGE_NAME-$FULL_VERSION.$PACKAGE_ARCH"
|
||||
RESPONSE=$(curl -s -w "\n%{http_code}" -X DELETE \
|
||||
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
|
||||
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/$GROUP/package/$PACKAGE_NAME/$FULL_VERSION/$PACKAGE_ARCH")
|
||||
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
|
||||
|
||||
if [ "$HTTP_CODE" != "204" ] && [ "$HTTP_CODE" != "404" ]; then
|
||||
echo "ERROR: Failed to delete package (HTTP $HTTP_CODE)"
|
||||
echo "$RESPONSE" | head -n -1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
curl --fail-with-body \
|
||||
-X PUT \
|
||||
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
|
||||
-H "Content-Type: application/x-rpm" \
|
||||
-T "$RPM" \
|
||||
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/$GROUP/upload?sign=true"
|
||||
|
||||
echo ""
|
||||
echo "✅ Published binary RPM to: https://forgejo.ellis.link/continuwuation/-/packages/rpm/continuwuity/"
|
||||
echo "Group: $GROUP"
|
||||
|
||||
# Upload debug RPMs to separate group
|
||||
DEBUG_RPMS=$(find artifacts -name "*debuginfo*.rpm")
|
||||
if [ -n "$DEBUG_RPMS" ]; then
|
||||
echo ""
|
||||
echo "Publishing debug RPMs to group: ${GROUP}-debug"
|
||||
|
||||
for DEBUG_RPM in $DEBUG_RPMS; do
|
||||
echo "Publishing: $(basename "$DEBUG_RPM")"
|
||||
|
||||
DEBUG_INFO=$(rpm -qpi "$DEBUG_RPM" 2>/dev/null)
|
||||
DEBUG_NAME=$(echo "$DEBUG_INFO" | grep "^Name" | awk '{print $3}')
|
||||
DEBUG_VERSION=$(echo "$DEBUG_INFO" | grep "^Version" | awk '{print $3}')
|
||||
DEBUG_RELEASE=$(echo "$DEBUG_INFO" | grep "^Release" | awk '{print $3}')
|
||||
DEBUG_ARCH=$(echo "$DEBUG_INFO" | grep "^Architecture" | awk '{print $2}')
|
||||
DEBUG_FULL_VERSION="${DEBUG_VERSION}-${DEBUG_RELEASE}"
|
||||
|
||||
# Must delete existing package first (Forgejo limitation)
|
||||
RESPONSE=$(curl -s -w "\n%{http_code}" -X DELETE \
|
||||
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
|
||||
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/${GROUP}-debug/package/$DEBUG_NAME/$DEBUG_FULL_VERSION/$DEBUG_ARCH")
|
||||
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
|
||||
|
||||
if [ "$HTTP_CODE" != "204" ] && [ "$HTTP_CODE" != "404" ]; then
|
||||
echo "ERROR: Failed to delete debug package (HTTP $HTTP_CODE)"
|
||||
echo "$RESPONSE" | head -n -1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
curl --fail-with-body \
|
||||
-X PUT \
|
||||
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
|
||||
-H "Content-Type: application/x-rpm" \
|
||||
-T "$DEBUG_RPM" \
|
||||
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/${GROUP}-debug/upload?sign=true"
|
||||
done
|
||||
|
||||
echo "✅ Published debug RPMs to group: ${GROUP}-debug"
|
||||
fi
|
||||
|
||||
# Also upload the SRPM to separate group
|
||||
SRPM=$(find artifacts -name "*.src.rpm" | head -1)
|
||||
if [ -n "$SRPM" ]; then
|
||||
echo ""
|
||||
echo "Publishing source RPM: $(basename "$SRPM")"
|
||||
echo "Publishing to group: ${GROUP}-src"
|
||||
|
||||
SRPM_INFO=$(rpm -qpi "$SRPM" 2>/dev/null)
|
||||
SRPM_NAME=$(echo "$SRPM_INFO" | grep "^Name" | awk '{print $3}')
|
||||
SRPM_VERSION=$(echo "$SRPM_INFO" | grep "^Version" | awk '{print $3}')
|
||||
SRPM_RELEASE=$(echo "$SRPM_INFO" | grep "^Release" | awk '{print $3}')
|
||||
SRPM_FULL_VERSION="${SRPM_VERSION}-${SRPM_RELEASE}"
|
||||
|
||||
# Must delete existing SRPM first (Forgejo limitation)
|
||||
echo "Removing any existing SRPM: $SRPM_NAME-$SRPM_FULL_VERSION.src"
|
||||
RESPONSE=$(curl -s -w "\n%{http_code}" -X DELETE \
|
||||
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
|
||||
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/${GROUP}-src/package/$SRPM_NAME/$SRPM_FULL_VERSION/src")
|
||||
HTTP_CODE=$(echo "$RESPONSE" | tail -n1)
|
||||
|
||||
if [ "$HTTP_CODE" != "204" ] && [ "$HTTP_CODE" != "404" ]; then
|
||||
echo "ERROR: Failed to delete SRPM (HTTP $HTTP_CODE)"
|
||||
echo "$RESPONSE" | head -n -1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
curl --fail-with-body \
|
||||
-X PUT \
|
||||
-H "Authorization: token ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}" \
|
||||
-H "Content-Type: application/x-rpm" \
|
||||
-T "$SRPM" \
|
||||
"https://forgejo.ellis.link/api/packages/continuwuation/rpm/${GROUP}-src/upload?sign=true"
|
||||
|
||||
echo "✅ Published source RPM to group: ${GROUP}-src"
|
||||
fi
|
||||
@@ -21,7 +21,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Sync repository
|
||||
uses: https://github.com/actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
@@ -49,10 +49,21 @@ jobs:
|
||||
cp ./docs/static/_headers ./public/_headers
|
||||
echo "Copied .well-known files and _headers to ./public"
|
||||
|
||||
- name: Detect runner environment
|
||||
id: runner-env
|
||||
uses: https://git.tomfos.tr/actions/detect-versions@v1
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: https://github.com/actions/setup-node@v4
|
||||
if: steps.runner-env.outputs.node_major == '' || steps.runner-env.outputs.node_major < '20'
|
||||
uses: https://github.com/actions/setup-node@v5
|
||||
with:
|
||||
node-version: 20
|
||||
node-version: 22
|
||||
|
||||
- name: Cache npm dependencies
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.npm
|
||||
key: continuwuity-${{ steps.runner-env.outputs.slug }}-${{ steps.runner-env.outputs.arch }}-node-${{ steps.runner-env.outputs.node_version }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm install --save-dev wrangler@latest
|
||||
|
||||
@@ -4,6 +4,14 @@ on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths:
|
||||
- ".forgejo/workflows/element.yml"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- ".forgejo/workflows/element.yml"
|
||||
|
||||
concurrency:
|
||||
group: "element-${{ github.ref }}"
|
||||
@@ -16,7 +24,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: 📦 Setup Node.js
|
||||
uses: https://github.com/actions/setup-node@v4
|
||||
uses: https://github.com/actions/setup-node@v5
|
||||
with:
|
||||
node-version: "22"
|
||||
|
||||
@@ -101,7 +109,7 @@ jobs:
|
||||
cat ./element-web/webapp/config.json
|
||||
|
||||
- name: 📤 Upload Artifact
|
||||
uses: https://code.forgejo.org/actions/upload-artifact@v3
|
||||
uses: forgejo/upload-artifact@v4
|
||||
with:
|
||||
name: element-web
|
||||
path: ./element-web/webapp/
|
||||
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
name: Checks / Prefligit
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
prefligit:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
FROM_REF: ${{ github.event.pull_request.base.sha || (!github.event.forced && ( github.event.before != '0000000000000000000000000000000000000000' && github.event.before || github.sha )) || format('{0}~', github.sha) }}
|
||||
TO_REF: ${{ github.sha }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
- uses: ./.forgejo/actions/prefligit
|
||||
with:
|
||||
extra_args: --all-files --hook-stage manual
|
||||
83
.forgejo/workflows/prek-checks.yml
Normal file
83
.forgejo/workflows/prek-checks.yml
Normal file
@@ -0,0 +1,83 @@
|
||||
name: Checks / Prek
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
fast-checks:
|
||||
name: Pre-commit & Formatting
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup Rust nightly
|
||||
uses: ./.forgejo/actions/setup-rust
|
||||
with:
|
||||
rust-version: nightly
|
||||
github-token: ${{ secrets.GH_PUBLIC_RO }}
|
||||
|
||||
- name: Run prek
|
||||
run: |
|
||||
prek run \
|
||||
--all-files \
|
||||
--hook-stage manual \
|
||||
--show-diff-on-failure \
|
||||
--color=always \
|
||||
-v
|
||||
|
||||
- name: Check Rust formatting
|
||||
run: |
|
||||
cargo +nightly fmt --all -- --check && \
|
||||
echo "✅ Formatting check passed" || \
|
||||
exit 1
|
||||
|
||||
clippy-and-tests:
|
||||
name: Clippy and Cargo Tests
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Setup LLVM
|
||||
uses: ./.forgejo/actions/setup-llvm-with-apt
|
||||
with:
|
||||
extra-packages: liburing-dev liburing2
|
||||
|
||||
- name: Setup Rust with caching
|
||||
uses: ./.forgejo/actions/setup-rust
|
||||
with:
|
||||
github-token: ${{ secrets.GH_PUBLIC_RO }}
|
||||
|
||||
- name: Run Clippy lints
|
||||
run: |
|
||||
cargo clippy \
|
||||
--workspace \
|
||||
--features full \
|
||||
--locked \
|
||||
--no-deps \
|
||||
--profile test \
|
||||
-- \
|
||||
-D warnings
|
||||
|
||||
- name: Run Cargo tests
|
||||
run: |
|
||||
cargo test \
|
||||
--workspace \
|
||||
--features full \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-targets \
|
||||
--no-fail-fast
|
||||
@@ -3,62 +3,40 @@ concurrency:
|
||||
group: "release-image-${{ github.ref }}"
|
||||
|
||||
on:
|
||||
push:
|
||||
pull_request:
|
||||
paths-ignore:
|
||||
- "*.md"
|
||||
- "**/*.md"
|
||||
- ".gitlab-ci.yml"
|
||||
- ".gitignore"
|
||||
- "renovate.json"
|
||||
- "debian/**"
|
||||
- "docker/**"
|
||||
- "pkg/**"
|
||||
- "docs/**"
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths-ignore:
|
||||
- "*.md"
|
||||
- "**/*.md"
|
||||
- ".gitlab-ci.yml"
|
||||
- ".gitignore"
|
||||
- "renovate.json"
|
||||
- "pkg/**"
|
||||
- "docs/**"
|
||||
tags:
|
||||
- "v*.*.*"
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
BUILTIN_REGISTRY: forgejo.ellis.link
|
||||
BUILTIN_REGISTRY_ENABLED: "${{ ((vars.BUILTIN_REGISTRY_USER && secrets.BUILTIN_REGISTRY_PASSWORD) || (github.event_name != 'pull_request' || github.event.pull_request.head.repo.fork == false)) && 'true' || 'false' }}"
|
||||
IMAGE_PATH: forgejo.ellis.link/continuwuation/continuwuity
|
||||
|
||||
jobs:
|
||||
define-variables:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
images: ${{ steps.var.outputs.images }}
|
||||
images_list: ${{ steps.var.outputs.images_list }}
|
||||
build_matrix: ${{ steps.var.outputs.build_matrix }}
|
||||
|
||||
steps:
|
||||
- name: Setting variables
|
||||
uses: https://github.com/actions/github-script@v7
|
||||
id: var
|
||||
with:
|
||||
script: |
|
||||
const githubRepo = '${{ github.repository }}'.toLowerCase()
|
||||
const repoId = githubRepo.split('/')[1]
|
||||
|
||||
core.setOutput('github_repository', githubRepo)
|
||||
const builtinImage = '${{ env.BUILTIN_REGISTRY }}/' + githubRepo
|
||||
let images = []
|
||||
if (process.env.BUILTIN_REGISTRY_ENABLED === "true") {
|
||||
images.push(builtinImage)
|
||||
}
|
||||
core.setOutput('images', images.join("\n"))
|
||||
core.setOutput('images_list', images.join(","))
|
||||
const platforms = ['linux/amd64', 'linux/arm64']
|
||||
core.setOutput('build_matrix', JSON.stringify({
|
||||
platform: platforms,
|
||||
target_cpu: ['base'],
|
||||
include: platforms.map(platform => { return {
|
||||
platform,
|
||||
slug: platform.replace('/', '-')
|
||||
}})
|
||||
}))
|
||||
|
||||
build-image:
|
||||
build-release:
|
||||
name: "Build ${{ matrix.slug }} (release)"
|
||||
runs-on: dind
|
||||
needs: define-variables
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
@@ -66,116 +44,28 @@ jobs:
|
||||
id-token: write
|
||||
strategy:
|
||||
matrix:
|
||||
{
|
||||
"target_cpu": ["base"],
|
||||
"profile": ["release"],
|
||||
"include":
|
||||
[
|
||||
{ "platform": "linux/amd64", "slug": "linux-amd64" },
|
||||
{ "platform": "linux/arm64", "slug": "linux-arm64" },
|
||||
],
|
||||
"platform": ["linux/amd64", "linux/arm64"],
|
||||
}
|
||||
include:
|
||||
- platform: "linux/amd64"
|
||||
slug: "linux-amd64"
|
||||
- platform: "linux/arm64"
|
||||
slug: "linux-arm64"
|
||||
|
||||
steps:
|
||||
- name: Echo strategy
|
||||
run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}'
|
||||
- name: Echo matrix
|
||||
run: echo '${{ toJSON(matrix) }}'
|
||||
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Install rust
|
||||
id: rust-toolchain
|
||||
uses: ./.forgejo/actions/rust-toolchain
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||
- name: Login to builtin registry
|
||||
uses: docker/login-action@v3
|
||||
- name: Prepare Docker build environment
|
||||
id: prepare
|
||||
uses: ./.forgejo/actions/prepare-docker-build
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
|
||||
# This step uses [docker/metadata-action](https://github.com/docker/metadata-action#about) to extract tags and labels that will be applied to the specified image. The `id` "meta" allows the output of this step to be referenced in a subsequent step. The `images` value provides the base name for the tags and labels.
|
||||
- name: Extract metadata (labels, annotations) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{needs.define-variables.outputs.images}}
|
||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: manifest,index
|
||||
|
||||
# This step uses the `docker/build-push-action` action to build the image, based on your repository's `Dockerfile`. If the build succeeds, it pushes the image to GitHub Packages.
|
||||
# It uses the `context` parameter to define the build's context as the set of files located in the specified path. For more information, see "[Usage](https://github.com/docker/build-push-action#usage)" in the README of the `docker/build-push-action` repository.
|
||||
# It uses the `tags` and `labels` parameters to tag and label the image with the output from the "meta" step.
|
||||
# It will not push images generated from a pull request
|
||||
- name: Get short git commit SHA
|
||||
id: sha
|
||||
run: |
|
||||
calculatedSha=$(git rev-parse --short ${{ github.sha }})
|
||||
echo "COMMIT_SHORT_SHA=$calculatedSha" >> $GITHUB_ENV
|
||||
- name: Get Git commit timestamps
|
||||
run: echo "TIMESTAMP=$(git log -1 --pretty=%ct)" >> $GITHUB_ENV
|
||||
|
||||
- uses: ./.forgejo/actions/timelord
|
||||
with:
|
||||
key: timelord-v0
|
||||
path: .
|
||||
|
||||
- name: Cache Rust registry
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
.cargo/git
|
||||
.cargo/git/checkouts
|
||||
.cargo/registry
|
||||
.cargo/registry/src
|
||||
key: rust-registry-image-${{hashFiles('**/Cargo.lock') }}
|
||||
- name: Cache cargo target
|
||||
id: cache-cargo-target
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||
key: cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}}
|
||||
- name: Cache apt cache
|
||||
id: cache-apt
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
var-cache-apt-${{ matrix.slug }}
|
||||
key: var-cache-apt-${{ matrix.slug }}
|
||||
- name: Cache apt lib
|
||||
id: cache-apt-lib
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
var-lib-apt-${{ matrix.slug }}
|
||||
key: var-lib-apt-${{ matrix.slug }}
|
||||
- name: inject cache into docker
|
||||
uses: https://github.com/reproducible-containers/buildkit-cache-dance@v3.1.0
|
||||
with:
|
||||
cache-map: |
|
||||
{
|
||||
".cargo/registry": "/usr/local/cargo/registry",
|
||||
".cargo/git/db": "/usr/local/cargo/git/db",
|
||||
"cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}": {
|
||||
"target": "/app/target",
|
||||
"id": "cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}"
|
||||
},
|
||||
"var-cache-apt-${{ matrix.slug }}": "/var/cache/apt",
|
||||
"var-lib-apt-${{ matrix.slug }}": "/var/lib/apt"
|
||||
}
|
||||
skip-extraction: ${{ steps.cache.outputs.cache-hit }}
|
||||
|
||||
platform: ${{ matrix.platform }}
|
||||
slug: ${{ matrix.slug }}
|
||||
target_cpu: ""
|
||||
profile: "release"
|
||||
images: ${{ env.IMAGE_PATH }}
|
||||
registry_user: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push Docker image by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
@@ -183,114 +73,134 @@ jobs:
|
||||
context: .
|
||||
file: "docker/Dockerfile"
|
||||
build-args: |
|
||||
GIT_COMMIT_HASH=${{ github.sha }})
|
||||
GIT_COMMIT_HASH=${{ github.sha }}
|
||||
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
|
||||
GIT_REMOTE_URL=${{github.event.repository.html_url }}
|
||||
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
|
||||
CARGO_INCREMENTAL=${{ env.BUILDKIT_ENDPOINT != '' && '1' || '0' }}
|
||||
TARGET_CPU=
|
||||
RUST_PROFILE=release
|
||||
platforms: ${{ matrix.platform }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
annotations: ${{ steps.meta.outputs.annotations }}
|
||||
labels: ${{ steps.prepare.outputs.metadata_labels }}
|
||||
annotations: ${{ steps.prepare.outputs.metadata_annotations }}
|
||||
cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
sbom: true
|
||||
outputs: type=image,"name=${{ needs.define-variables.outputs.images_list }}",push-by-digest=true,name-canonical=true,push=true
|
||||
outputs: |
|
||||
${{ env.BUILTIN_REGISTRY_ENABLED == 'true' && format('type=image,"name={0}",push-by-digest=true,name-canonical=true,push=true', env.IMAGE_PATH) || format('type=image,"name={0}",push=false', env.IMAGE_PATH) }}
|
||||
type=local,dest=/tmp/binaries
|
||||
env:
|
||||
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
|
||||
|
||||
# For publishing multi-platform manifests
|
||||
- name: Export digest
|
||||
run: |
|
||||
mkdir -p /tmp/digests
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/${digest#sha256:}"
|
||||
|
||||
- name: Extract binary from container (image)
|
||||
id: extract-binary-image
|
||||
run: |
|
||||
mkdir -p /tmp/binaries
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
echo "container_id=$(docker create --platform ${{ matrix.platform }} ${{ needs.define-variables.outputs.images_list }}@$digest)" >> $GITHUB_OUTPUT
|
||||
- name: Extract binary from container (copy)
|
||||
run: docker cp ${{ steps.extract-binary-image.outputs.container_id }}:/sbin/conduwuit /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||
- name: Extract binary from container (cleanup)
|
||||
run: docker rm ${{ steps.extract-binary-image.outputs.container_id }}
|
||||
|
||||
- name: Upload binary artifact
|
||||
uses: forgejo/upload-artifact@v4
|
||||
- name: Upload Docker artifacts
|
||||
uses: ./.forgejo/actions/upload-docker-artifacts
|
||||
with:
|
||||
name: conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||
path: /tmp/binaries/conduwuit-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}
|
||||
if-no-files-found: error
|
||||
slug: ${{ matrix.slug }}
|
||||
cpu_suffix: ${{ steps.prepare.outputs.cpu_suffix }}
|
||||
artifact_suffix: ""
|
||||
digest_suffix: ""
|
||||
digest: ${{ steps.build.outputs.digest }}
|
||||
|
||||
- name: Upload digest
|
||||
uses: forgejo/upload-artifact@v4
|
||||
with:
|
||||
name: digests-${{ matrix.slug }}
|
||||
path: /tmp/digests/*
|
||||
if-no-files-found: error
|
||||
retention-days: 5
|
||||
|
||||
merge:
|
||||
merge-release:
|
||||
name: "Create Multi-arch Release Manifest"
|
||||
runs-on: dind
|
||||
needs: [define-variables, build-image]
|
||||
needs: build-release
|
||||
steps:
|
||||
- name: Download digests
|
||||
uses: forgejo/download-artifact@v4
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
path: /tmp/digests
|
||||
pattern: digests-*
|
||||
merge-multiple: true
|
||||
# Uses the `docker/login-action` action to log in to the Container registry registry using the account and password that will publish the packages. Once published, the packages are scoped to the account defined here.
|
||||
- name: Login to builtin registry
|
||||
uses: docker/login-action@v3
|
||||
persist-credentials: false
|
||||
- name: Create multi-platform manifest
|
||||
uses: ./.forgejo/actions/create-docker-manifest
|
||||
with:
|
||||
registry: ${{ env.BUILTIN_REGISTRY }}
|
||||
username: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
digest_pattern: "digests-linux-{amd64,arm64}"
|
||||
tag_suffix: ""
|
||||
images: ${{ env.IMAGE_PATH }}
|
||||
registry_user: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
build-maxperf:
|
||||
name: "Build ${{ matrix.slug }} (max-perf)"
|
||||
runs-on: dind
|
||||
needs: build-release
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
attestations: write
|
||||
id-token: write
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- platform: "linux/amd64"
|
||||
slug: "linux-amd64"
|
||||
target_cpu: "haswell"
|
||||
- platform: "linux/arm64"
|
||||
slug: "linux-arm64"
|
||||
target_cpu: ""
|
||||
|
||||
- name: Extract metadata (tags) for Docker
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
tags: |
|
||||
type=semver,pattern={{version}},prefix=v
|
||||
type=semver,pattern={{major}}.{{minor}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.0.') }},prefix=v
|
||||
type=semver,pattern={{major}},enable=${{ !startsWith(github.ref, 'refs/tags/v0.') }},prefix=v
|
||||
type=ref,event=branch,prefix=${{ format('refs/heads/{0}', github.event.repository.default_branch) != github.ref && 'branch-' || '' }}
|
||||
type=ref,event=pr
|
||||
type=sha,format=long
|
||||
type=raw,value=latest,enable=${{ startsWith(github.ref, 'refs/tags/v') }}
|
||||
images: ${{needs.define-variables.outputs.images}}
|
||||
# default labels & annotations: https://github.com/docker/metadata-action/blob/master/src/meta.ts#L509
|
||||
persist-credentials: false
|
||||
- name: Prepare max-perf Docker build environment
|
||||
id: prepare
|
||||
uses: ./.forgejo/actions/prepare-docker-build
|
||||
with:
|
||||
platform: ${{ matrix.platform }}
|
||||
slug: ${{ matrix.slug }}
|
||||
target_cpu: ${{ matrix.target_cpu }}
|
||||
profile: "release-max-perf"
|
||||
images: ${{ env.IMAGE_PATH }}
|
||||
registry_user: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
- name: Build and push max-perf Docker image by digest
|
||||
id: build
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: "docker/Dockerfile"
|
||||
build-args: |
|
||||
GIT_COMMIT_HASH=${{ github.sha }}
|
||||
GIT_COMMIT_HASH_SHORT=${{ env.COMMIT_SHORT_SHA }}
|
||||
GIT_REMOTE_URL=${{github.event.repository.html_url }}
|
||||
GIT_REMOTE_COMMIT_URL=${{github.event.head_commit.url }}
|
||||
CARGO_INCREMENTAL=${{ env.BUILDKIT_ENDPOINT != '' && '1' || '0' }}
|
||||
TARGET_CPU=${{ matrix.target_cpu }}
|
||||
RUST_PROFILE=release-max-perf
|
||||
platforms: ${{ matrix.platform }}
|
||||
labels: ${{ steps.prepare.outputs.metadata_labels }}
|
||||
annotations: ${{ steps.prepare.outputs.metadata_annotations }}
|
||||
cache-from: type=gha
|
||||
# cache-to: type=gha,mode=max
|
||||
sbom: true
|
||||
outputs: |
|
||||
${{ env.BUILTIN_REGISTRY_ENABLED == 'true' && format('type=image,"name={0}",push-by-digest=true,name-canonical=true,push=true', env.IMAGE_PATH) || format('type=image,"name={0}",push=false', env.IMAGE_PATH) }}
|
||||
type=local,dest=/tmp/binaries
|
||||
env:
|
||||
DOCKER_METADATA_ANNOTATIONS_LEVELS: index
|
||||
SOURCE_DATE_EPOCH: ${{ env.TIMESTAMP }}
|
||||
- name: Upload max-perf Docker artifacts
|
||||
uses: ./.forgejo/actions/upload-docker-artifacts
|
||||
with:
|
||||
slug: ${{ matrix.slug }}
|
||||
cpu_suffix: ${{ steps.prepare.outputs.cpu_suffix }}
|
||||
artifact_suffix: "-maxperf"
|
||||
digest_suffix: "-maxperf"
|
||||
digest: ${{ steps.build.outputs.digest }}
|
||||
|
||||
- name: Create manifest list and push
|
||||
working-directory: /tmp/digests
|
||||
env:
|
||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
||||
shell: bash
|
||||
run: |
|
||||
IFS=$'\n'
|
||||
IMAGES_LIST=($IMAGES)
|
||||
ANNOTATIONS_LIST=($DOCKER_METADATA_OUTPUT_ANNOTATIONS)
|
||||
TAGS_LIST=($DOCKER_METADATA_OUTPUT_TAGS)
|
||||
for REPO in "${IMAGES_LIST[@]}"; do
|
||||
docker buildx imagetools create \
|
||||
$(for tag in "${TAGS_LIST[@]}"; do echo "--tag"; echo "$tag"; done) \
|
||||
$(for annotation in "${ANNOTATIONS_LIST[@]}"; do echo "--annotation"; echo "$annotation"; done) \
|
||||
$(for reference in *; do printf "$REPO@sha256:%s\n" $reference; done)
|
||||
done
|
||||
|
||||
- name: Inspect image
|
||||
env:
|
||||
IMAGES: ${{needs.define-variables.outputs.images}}
|
||||
shell: bash
|
||||
run: |
|
||||
IMAGES_LIST=($IMAGES)
|
||||
for REPO in "${IMAGES_LIST[@]}"; do
|
||||
docker buildx imagetools inspect $REPO:${{ steps.meta.outputs.version }}
|
||||
done
|
||||
merge-maxperf:
|
||||
name: "Create Max-Perf Manifest"
|
||||
runs-on: dind
|
||||
needs: build-maxperf
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
persist-credentials: false
|
||||
- name: Create max-perf manifest
|
||||
uses: ./.forgejo/actions/create-docker-manifest
|
||||
with:
|
||||
digest_pattern: "digests-maxperf-linux-{amd64-haswell,arm64}"
|
||||
tag_suffix: "-maxperf"
|
||||
images: ${{ env.IMAGE_PATH }}
|
||||
registry_user: ${{ vars.BUILTIN_REGISTRY_USER || github.actor }}
|
||||
registry_password: ${{ secrets.BUILTIN_REGISTRY_PASSWORD || secrets.GITHUB_TOKEN }}
|
||||
|
||||
132
.forgejo/workflows/renovate.yml
Normal file
132
.forgejo/workflows/renovate.yml
Normal file
@@ -0,0 +1,132 @@
|
||||
name: Maintenance / Renovate
|
||||
|
||||
enable-email-notifications: true
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Run at 5am UTC daily to avoid late-night dev
|
||||
- cron: '0 5 * * *'
|
||||
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
dryRun:
|
||||
description: 'Dry run mode'
|
||||
required: false
|
||||
default: ''
|
||||
type: choice
|
||||
options:
|
||||
- ''
|
||||
- 'extract'
|
||||
- 'lookup'
|
||||
- 'full'
|
||||
logLevel:
|
||||
description: 'Log level'
|
||||
required: false
|
||||
default: 'info'
|
||||
type: choice
|
||||
options:
|
||||
- 'debug'
|
||||
- 'info'
|
||||
- 'warning'
|
||||
- 'critical'
|
||||
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
# Re-run when config changes
|
||||
- '.forgejo/workflows/renovate.yml'
|
||||
- 'renovate.json'
|
||||
|
||||
jobs:
|
||||
renovate:
|
||||
name: Renovate
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ghcr.io/renovatebot/renovate:41.146.4@sha256:bb70194b7405faf10a6f279b60caa10403a440ba37d158c5a4ef0ae7b67a0f92
|
||||
options: --tmpfs /tmp:exec
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
show-progress: false
|
||||
|
||||
- name: print node heap
|
||||
run: /usr/local/renovate/node -e 'console.log(`node heap limit = ${require("v8").getHeapStatistics().heap_size_limit / (1024 * 1024)} Mb`)'
|
||||
|
||||
- name: Restore renovate repo cache
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
/tmp/renovate/cache/renovate/repository
|
||||
key: renovate-repo-cache-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
renovate-repo-cache-
|
||||
|
||||
- name: Restore renovate package cache
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
/tmp/renovate/cache/renovate/renovate-cache-sqlite
|
||||
key: renovate-package-cache-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
renovate-package-cache-
|
||||
|
||||
- name: Restore renovate OSV cache
|
||||
uses: actions/cache/restore@v4
|
||||
with:
|
||||
path: |
|
||||
/tmp/osv
|
||||
key: renovate-osv-cache-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
renovate-osv-cache-
|
||||
|
||||
- name: Self-hosted Renovate
|
||||
run: renovate
|
||||
env:
|
||||
LOG_LEVEL: ${{ inputs.logLevel || 'info' }}
|
||||
RENOVATE_DRY_RUN: ${{ inputs.dryRun || 'false' }}
|
||||
|
||||
RENOVATE_PLATFORM: forgejo
|
||||
RENOVATE_ENDPOINT: ${{ github.server_url }}
|
||||
RENOVATE_AUTODISCOVER: 'false'
|
||||
RENOVATE_REPOSITORIES: '["${{ github.repository }}"]'
|
||||
|
||||
RENOVATE_GIT_TIMEOUT: 60000
|
||||
|
||||
RENOVATE_REQUIRE_CONFIG: 'required'
|
||||
RENOVATE_ONBOARDING: 'false'
|
||||
RENOVATE_INHERIT_CONFIG: 'true'
|
||||
|
||||
RENOVATE_GITHUB_TOKEN_WARN: 'false'
|
||||
RENOVATE_TOKEN: ${{ secrets.RENOVATE_TOKEN }}
|
||||
GITHUB_COM_TOKEN: ${{ secrets.GH_PUBLIC_RO || secrets.GH_TOKEN }}
|
||||
|
||||
RENOVATE_REPOSITORY_CACHE: 'enabled'
|
||||
RENOVATE_X_SQLITE_PACKAGE_CACHE: 'true'
|
||||
OSV_OFFLINE_ROOT_DIR: /tmp/osv
|
||||
|
||||
- name: Save renovate repo cache
|
||||
if: always()
|
||||
uses:
|
||||
actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
/tmp/renovate/cache/renovate/repository
|
||||
key: renovate-repo-cache-${{ github.run_id }}
|
||||
|
||||
- name: Save renovate package cache
|
||||
if: always()
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
/tmp/renovate/cache/renovate/renovate-cache-sqlite
|
||||
key: renovate-package-cache-${{ github.run_id }}
|
||||
|
||||
- name: Save renovate OSV cache
|
||||
if: always()
|
||||
uses: actions/cache/save@v4
|
||||
with:
|
||||
path: |
|
||||
/tmp/osv
|
||||
key: renovate-osv-cache-${{ github.run_id }}
|
||||
@@ -1,144 +0,0 @@
|
||||
name: Checks / Rust
|
||||
|
||||
on:
|
||||
push:
|
||||
|
||||
jobs:
|
||||
format:
|
||||
name: Format
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install rust
|
||||
uses: ./.forgejo/actions/rust-toolchain
|
||||
with:
|
||||
toolchain: "nightly"
|
||||
components: "rustfmt"
|
||||
|
||||
- name: Check formatting
|
||||
run: |
|
||||
cargo +nightly fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install rust
|
||||
uses: ./.forgejo/actions/rust-toolchain
|
||||
|
||||
- uses: https://github.com/actions/create-github-app-token@v2
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.GH_APP_ID }}
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||
github-api-url: https://api.github.com
|
||||
owner: ${{ vars.GH_APP_OWNER }}
|
||||
repositories: ""
|
||||
- name: Install sccache
|
||||
uses: ./.forgejo/actions/sccache
|
||||
with:
|
||||
token: ${{ steps.app-token.outputs.token }}
|
||||
- run: sudo apt-get update
|
||||
- name: Install system dependencies
|
||||
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
|
||||
with:
|
||||
packages: clang liburing-dev
|
||||
version: 1
|
||||
- name: Cache Rust registry
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/git
|
||||
!~/.cargo/git/checkouts
|
||||
~/.cargo/registry
|
||||
!~/.cargo/registry/src
|
||||
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
|
||||
- name: Timelord
|
||||
uses: ./.forgejo/actions/timelord
|
||||
with:
|
||||
key: sccache-v0
|
||||
path: .
|
||||
- name: Clippy
|
||||
run: |
|
||||
cargo clippy \
|
||||
--workspace \
|
||||
--all-features \
|
||||
--locked \
|
||||
--no-deps \
|
||||
--profile test \
|
||||
-- \
|
||||
-D warnings
|
||||
|
||||
- name: Show sccache stats
|
||||
if: always()
|
||||
run: sccache --show-stats
|
||||
|
||||
cargo-test:
|
||||
name: Cargo Test
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: Install rust
|
||||
uses: ./.forgejo/actions/rust-toolchain
|
||||
|
||||
- uses: https://github.com/actions/create-github-app-token@v2
|
||||
id: app-token
|
||||
with:
|
||||
app-id: ${{ vars.GH_APP_ID }}
|
||||
private-key: ${{ secrets.GH_APP_PRIVATE_KEY }}
|
||||
github-api-url: https://api.github.com
|
||||
owner: ${{ vars.GH_APP_OWNER }}
|
||||
repositories: ""
|
||||
- name: Install sccache
|
||||
uses: ./.forgejo/actions/sccache
|
||||
with:
|
||||
token: ${{ steps.app-token.outputs.token }}
|
||||
- run: sudo apt-get update
|
||||
- name: Install system dependencies
|
||||
uses: https://github.com/awalsh128/cache-apt-pkgs-action@v1
|
||||
with:
|
||||
packages: clang liburing-dev
|
||||
version: 1
|
||||
- name: Cache Rust registry
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/git
|
||||
!~/.cargo/git/checkouts
|
||||
~/.cargo/registry
|
||||
!~/.cargo/registry/src
|
||||
key: rust-registry-${{hashFiles('**/Cargo.lock') }}
|
||||
- name: Timelord
|
||||
uses: ./.forgejo/actions/timelord
|
||||
with:
|
||||
key: sccache-v0
|
||||
path: .
|
||||
- name: Cargo Test
|
||||
run: |
|
||||
cargo test \
|
||||
--workspace \
|
||||
--all-features \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-targets \
|
||||
--no-fail-fast
|
||||
|
||||
- name: Show sccache stats
|
||||
if: always()
|
||||
run: sccache --show-stats
|
||||
108
.forgejo/workflows/update-flake-hashes.yml
Normal file
108
.forgejo/workflows/update-flake-hashes.yml
Normal file
@@ -0,0 +1,108 @@
|
||||
name: Update flake hashes
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
paths:
|
||||
- "Cargo.lock"
|
||||
- "Cargo.toml"
|
||||
- "rust-toolchain.toml"
|
||||
- ".forgejo/workflows/update-flake-hashes.yml"
|
||||
|
||||
jobs:
|
||||
update-flake-hashes:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: https://code.forgejo.org/actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: false
|
||||
fetch-single-branch: true
|
||||
submodules: false
|
||||
persist-credentials: false
|
||||
|
||||
- uses: https://github.com/cachix/install-nix-action@7ab6e7fd29da88e74b1e314a4ae9ac6b5cda3801 # v31.8.0
|
||||
with:
|
||||
nix_path: nixpkgs=channel:nixos-unstable
|
||||
|
||||
# We can skip getting a toolchain hash if this was ran as a dispatch with the intent
|
||||
# to update just the rocksdb hash. If this was ran as a dispatch and the toolchain
|
||||
# files are changed, we still update them, as well as the rocksdb import.
|
||||
- name: Detect changed files
|
||||
id: changes
|
||||
run: |
|
||||
git fetch origin ${{ github.base_ref }} --depth=1 || true
|
||||
if [ -n "${{ github.event.pull_request.base.sha }}" ]; then
|
||||
base=${{ github.event.pull_request.base.sha }}
|
||||
else
|
||||
base=$(git rev-parse HEAD~1)
|
||||
fi
|
||||
echo "Base: $base"
|
||||
echo "HEAD: $(git rev-parse HEAD)"
|
||||
git diff --name-only $base HEAD > changed_files.txt
|
||||
echo "files=$(cat changed_files.txt)" >> $FORGEJO_OUTPUT
|
||||
|
||||
- name: Get new toolchain hash
|
||||
if: contains(steps.changes.outputs.files, 'Cargo.toml') || contains(steps.changes.outputs.files, 'Cargo.lock') || contains(steps.changes.outputs.files, 'rust-toolchain.toml')
|
||||
run: |
|
||||
# Set the current sha256 to an empty hash to make `nix build` calculate a new one
|
||||
awk '/fromToolchainFile *\{/{found=1; print; next} found && /sha256 =/{sub(/sha256 = .*/, "sha256 = pkgsHost.lib.fakeSha256;"); found=0} 1' flake.nix > temp.nix && mv temp.nix flake.nix
|
||||
|
||||
# Build continuwuity and filter for the new hash
|
||||
# We do `|| true` because we want this to fail without stopping the workflow
|
||||
nix build .#default 2>&1 | tee >(grep 'got:' | awk '{print $2}' > new_toolchain_hash.txt) || true
|
||||
|
||||
# Place the new hash in place of the empty hash
|
||||
new_hash=$(cat new_toolchain_hash.txt)
|
||||
sed -i "s|pkgsHost.lib.fakeSha256|\"$new_hash\"|" flake.nix
|
||||
|
||||
echo "New hash:"
|
||||
awk -F'"' '/fromToolchainFile/{found=1; next} found && /sha256 =/{print $2; found=0}' flake.nix
|
||||
echo "Expected new hash:"
|
||||
cat new_toolchain_hash.txt
|
||||
|
||||
rm new_toolchain_hash.txt
|
||||
|
||||
- name: Get new rocksdb hash
|
||||
run: |
|
||||
# Set the current sha256 to an empty hash to make `nix build` calculate a new one
|
||||
awk '/repo = "rocksdb";/{found=1; print; next} found && /sha256 =/{sub(/sha256 = .*/, "sha256 = pkgsHost.lib.fakeSha256;"); found=0} 1' flake.nix > temp.nix && mv temp.nix flake.nix
|
||||
|
||||
# Build continuwuity and filter for the new hash
|
||||
# We do `|| true` because we want this to fail without stopping the workflow
|
||||
nix build .#default 2>&1 | tee >(grep 'got:' | awk '{print $2}' > new_rocksdb_hash.txt) || true
|
||||
|
||||
# Place the new hash in place of the empty hash
|
||||
new_hash=$(cat new_rocksdb_hash.txt)
|
||||
sed -i "s|pkgsHost.lib.fakeSha256|\"$new_hash\"|" flake.nix
|
||||
|
||||
echo "New hash:"
|
||||
awk -F'"' '/repo = "rocksdb";/{found=1; next} found && /sha256 =/{print $2; found=0}' flake.nix
|
||||
echo "Expected new hash:"
|
||||
cat new_rocksdb_hash.txt
|
||||
|
||||
rm new_rocksdb_hash.txt
|
||||
|
||||
- name: Show diff
|
||||
run: git diff flake.nix
|
||||
|
||||
- name: Push changes
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if git diff --quiet --exit-code; then
|
||||
echo "No changes to commit."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
git config user.email "renovate@mail.ellis.link"
|
||||
git config user.name "renovate"
|
||||
|
||||
REF="${{ github.head_ref }}"
|
||||
|
||||
git fetch origin "$REF"
|
||||
git checkout "$REF"
|
||||
|
||||
git commit -a -m "chore(Nix): Updated flake hashes"
|
||||
|
||||
git push origin HEAD:refs/heads/"$REF"
|
||||
5
.github/FUNDING.yml
vendored
5
.github/FUNDING.yml
vendored
@@ -1,5 +1,4 @@
|
||||
github: [JadedBlueEyes]
|
||||
# Doesn't support an array, so we can only list nex
|
||||
ko_fi: nexy7574
|
||||
github: [JadedBlueEyes, nexy7574]
|
||||
custom:
|
||||
- https://ko-fi.com/nexy7574
|
||||
- https://ko-fi.com/JadedBlueEyes
|
||||
|
||||
1
.mailmap
1
.mailmap
@@ -13,3 +13,4 @@ Rudi Floren <rudi.floren@gmail.com> <rudi.floren@googlemail.com>
|
||||
Tamara Schmitz <tamara.zoe.schmitz@posteo.de> <15906939+tamara-schmitz@users.noreply.github.com>
|
||||
Timo Kösters <timo@koesters.xyz>
|
||||
x4u <xi.zhu@protonmail.ch> <14617923-x4u@users.noreply.gitlab.com>
|
||||
Ginger <ginger@gingershaped.computer> <75683114+gingershaped@users.noreply.github.com>
|
||||
|
||||
@@ -9,7 +9,7 @@ repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v5.0.0
|
||||
hooks:
|
||||
- id: check-byte-order-marker
|
||||
- id: fix-byte-order-marker
|
||||
- id: check-case-conflict
|
||||
- id: check-symlinks
|
||||
- id: destroyed-symlinks
|
||||
|
||||
@@ -13,6 +13,9 @@ extend-ignore-re = [
|
||||
"[0-9+][A-Za-z0-9+]{30,}[a-z0-9+]",
|
||||
"\\$[A-Z0-9+][A-Za-z0-9+]{6,}[a-z0-9+]",
|
||||
"\\b[a-z0-9+/=][A-Za-z0-9+/=]{7,}[a-z0-9+/=][A-Z]\\b",
|
||||
|
||||
# In the renovate config
|
||||
".ontainer"
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
|
||||
3
.vscode/settings.json
vendored
3
.vscode/settings.json
vendored
@@ -7,5 +7,6 @@
|
||||
"continuwuity",
|
||||
"homeserver",
|
||||
"homeservers"
|
||||
]
|
||||
],
|
||||
"rust-analyzer.cargo.features": ["full"]
|
||||
}
|
||||
|
||||
@@ -65,11 +65,11 @@ # Run tests
|
||||
cargo test
|
||||
|
||||
# Check compilation
|
||||
cargo check --workspace --all-features
|
||||
cargo check --workspace --features full
|
||||
|
||||
# Run lints
|
||||
cargo clippy --workspace --all-features
|
||||
# Auto-fix: cargo clippy --workspace --all-features --fix --allow-staged;
|
||||
cargo clippy --workspace --features full
|
||||
# Auto-fix: cargo clippy --workspace --features full --fix --allow-staged;
|
||||
|
||||
# Format code (must use nightly)
|
||||
cargo +nightly fmt
|
||||
|
||||
1783
Cargo.lock
generated
1783
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
88
Cargo.toml
88
Cargo.toml
@@ -21,7 +21,7 @@ license = "Apache-2.0"
|
||||
readme = "README.md"
|
||||
repository = "https://forgejo.ellis.link/continuwuation/continuwuity"
|
||||
rust-version = "1.86.0"
|
||||
version = "0.5.0-rc.6"
|
||||
version = "0.5.0-rc.8"
|
||||
|
||||
[workspace.metadata.crane]
|
||||
name = "conduwuit"
|
||||
@@ -45,18 +45,18 @@ version = "0.3"
|
||||
features = ["ffi", "std", "union"]
|
||||
|
||||
[workspace.dependencies.const-str]
|
||||
version = "0.6.2"
|
||||
version = "0.7.0"
|
||||
|
||||
[workspace.dependencies.ctor]
|
||||
version = "0.2.9"
|
||||
version = "0.5.0"
|
||||
|
||||
[workspace.dependencies.cargo_toml]
|
||||
version = "0.21"
|
||||
version = "0.22"
|
||||
default-features = false
|
||||
features = ["features"]
|
||||
|
||||
[workspace.dependencies.toml]
|
||||
version = "0.8.14"
|
||||
version = "0.9.5"
|
||||
default-features = false
|
||||
features = ["parse"]
|
||||
|
||||
@@ -166,8 +166,8 @@ default-features = false
|
||||
features = ["raw_value"]
|
||||
|
||||
# Used for appservice registration files
|
||||
[workspace.dependencies.serde_yaml]
|
||||
version = "0.9.34"
|
||||
[workspace.dependencies.serde_yml]
|
||||
version = "0.0.12"
|
||||
|
||||
# Used to load forbidden room/user regex from config
|
||||
[workspace.dependencies.serde_regex]
|
||||
@@ -351,8 +351,7 @@ version = "0.1.2"
|
||||
# Used for matrix spec type definitions and helpers
|
||||
[workspace.dependencies.ruma]
|
||||
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
|
||||
#branch = "conduwuit-changes"
|
||||
rev = "b753738047d1f443aca870896ef27ecaacf027da"
|
||||
rev = "d18823471ab3c09e77ff03eea346d4c07e572654"
|
||||
features = [
|
||||
"compat",
|
||||
"rand",
|
||||
@@ -382,16 +381,18 @@ features = [
|
||||
"unstable-msc4095",
|
||||
"unstable-msc4121",
|
||||
"unstable-msc4125",
|
||||
"unstable-msc4155",
|
||||
"unstable-msc4186",
|
||||
"unstable-msc4203", # sending to-device events to appservices
|
||||
"unstable-msc4210", # remove legacy mentions
|
||||
"unstable-extensible-events",
|
||||
"unstable-pdu",
|
||||
"unstable-msc4155"
|
||||
]
|
||||
|
||||
[workspace.dependencies.rust-rocksdb]
|
||||
git = "https://forgejo.ellis.link/continuwuation/rust-rocksdb-zaidoon1"
|
||||
rev = "fc9a99ac54a54208f90fdcba33ae6ee8bc3531dd"
|
||||
rev = "61d9d23872197e9ace4a477f2617d5c9f50ecb23"
|
||||
default-features = false
|
||||
features = [
|
||||
"multi-threaded-cf",
|
||||
@@ -411,25 +412,28 @@ default-features = false
|
||||
|
||||
# optional opentelemetry, performance measurements, flamegraphs, etc for performance measurements and monitoring
|
||||
[workspace.dependencies.opentelemetry]
|
||||
version = "0.21.0"
|
||||
version = "0.30.0"
|
||||
|
||||
[workspace.dependencies.tracing-flame]
|
||||
version = "0.2.0"
|
||||
|
||||
[workspace.dependencies.tracing-opentelemetry]
|
||||
version = "0.22.0"
|
||||
version = "0.31.0"
|
||||
|
||||
[workspace.dependencies.opentelemetry_sdk]
|
||||
version = "0.21.2"
|
||||
version = "0.30.0"
|
||||
features = ["rt-tokio"]
|
||||
|
||||
[workspace.dependencies.opentelemetry-jaeger]
|
||||
version = "0.20.0"
|
||||
features = ["rt-tokio"]
|
||||
[workspace.dependencies.opentelemetry-otlp]
|
||||
version = "0.30.0"
|
||||
features = ["http", "trace", "logs", "metrics"]
|
||||
|
||||
[workspace.dependencies.opentelemetry-jaeger-propagator]
|
||||
version = "0.30.0"
|
||||
|
||||
# optional sentry metrics for crash/panic reporting
|
||||
[workspace.dependencies.sentry]
|
||||
version = "0.37.0"
|
||||
version = "0.42.0"
|
||||
default-features = false
|
||||
features = [
|
||||
"backtrace",
|
||||
@@ -445,9 +449,9 @@ features = [
|
||||
]
|
||||
|
||||
[workspace.dependencies.sentry-tracing]
|
||||
version = "0.37.0"
|
||||
version = "0.42.0"
|
||||
[workspace.dependencies.sentry-tower]
|
||||
version = "0.37.0"
|
||||
version = "0.42.0"
|
||||
|
||||
# jemalloc usage
|
||||
[workspace.dependencies.tikv-jemalloc-sys]
|
||||
@@ -476,7 +480,7 @@ features = ["use_std"]
|
||||
version = "0.4"
|
||||
|
||||
[workspace.dependencies.nix]
|
||||
version = "0.29.0"
|
||||
version = "0.30.1"
|
||||
default-features = false
|
||||
features = ["resource"]
|
||||
|
||||
@@ -498,7 +502,7 @@ version = "0.4.3"
|
||||
default-features = false
|
||||
|
||||
[workspace.dependencies.termimad]
|
||||
version = "0.31.2"
|
||||
version = "0.34.0"
|
||||
default-features = false
|
||||
|
||||
[workspace.dependencies.checked_ops]
|
||||
@@ -536,16 +540,24 @@ version = "0.2"
|
||||
version = "0.2"
|
||||
|
||||
[workspace.dependencies.minicbor]
|
||||
version = "0.26.3"
|
||||
version = "2.1.1"
|
||||
features = ["std"]
|
||||
|
||||
[workspace.dependencies.minicbor-serde]
|
||||
version = "0.4.1"
|
||||
version = "0.6.0"
|
||||
features = ["std"]
|
||||
|
||||
[workspace.dependencies.maplit]
|
||||
version = "1.0.2"
|
||||
|
||||
[workspace.dependencies.ldap3]
|
||||
version = "0.12.0"
|
||||
default-features = false
|
||||
features = ["sync", "tls-rustls", "rustls-provider"]
|
||||
|
||||
[workspace.dependencies.resolv-conf]
|
||||
version = "0.7.5"
|
||||
|
||||
#
|
||||
# Patches
|
||||
#
|
||||
@@ -590,12 +602,6 @@ rev = "9c8e51510c35077df888ee72a36b4b05637147da"
|
||||
git = "https://forgejo.ellis.link/continuwuation/hyper-util"
|
||||
rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941"
|
||||
|
||||
# Allows no-aaaa option in resolv.conf
|
||||
# Use 1-indexed line numbers when displaying parse error messages
|
||||
[patch.crates-io.resolv-conf]
|
||||
git = "https://forgejo.ellis.link/continuwuation/resolv-conf"
|
||||
rev = "56251316cc4127bcbf36e68ce5e2093f4d33e227"
|
||||
|
||||
#
|
||||
# Our crates
|
||||
#
|
||||
@@ -759,25 +765,6 @@ incremental = true
|
||||
|
||||
[profile.dev.package.conduwuit_core]
|
||||
inherits = "dev"
|
||||
#rustflags = [
|
||||
# '--cfg', 'conduwuit_mods',
|
||||
# '-Ztime-passes',
|
||||
# '-Zmir-opt-level=0',
|
||||
# '-Ztls-model=initial-exec',
|
||||
# '-Cprefer-dynamic=true',
|
||||
# '-Zstaticlib-prefer-dynamic=true',
|
||||
# '-Zstaticlib-allow-rdylib-deps=true',
|
||||
# '-Zpacked-bundled-libs=false',
|
||||
# '-Zplt=true',
|
||||
# '-Clink-arg=-Wl,--as-needed',
|
||||
# '-Clink-arg=-Wl,--allow-shlib-undefined',
|
||||
# '-Clink-arg=-Wl,-z,lazy',
|
||||
# '-Clink-arg=-Wl,-z,unique',
|
||||
# '-Clink-arg=-Wl,-z,nodlopen',
|
||||
# '-Clink-arg=-Wl,-z,nodelete',
|
||||
#]
|
||||
[profile.dev.package.xtask-generate-commands]
|
||||
inherits = "dev"
|
||||
[profile.dev.package.conduwuit]
|
||||
inherits = "dev"
|
||||
#rustflags = [
|
||||
@@ -867,7 +854,7 @@ unused-qualifications = "warn"
|
||||
#unused-results = "warn" # TODO
|
||||
|
||||
## some sadness
|
||||
elided_named_lifetimes = "allow" # TODO!
|
||||
mismatched_lifetime_syntaxes = "allow" # TODO!
|
||||
let_underscore_drop = "allow"
|
||||
missing_docs = "allow"
|
||||
# cfgs cannot be limited to expected cfgs or their de facto non-transitive/opt-in use-case e.g.
|
||||
@@ -1006,3 +993,6 @@ literal_string_with_formatting_args = { level = "allow", priority = 1 }
|
||||
|
||||
|
||||
needless_raw_string_hashes = "allow"
|
||||
|
||||
# TODO: Enable this lint & fix all instances
|
||||
collapsible_if = "allow"
|
||||
|
||||
@@ -57,7 +57,7 @@ ### What are the project's goals?
|
||||
|
||||
### Can I try it out?
|
||||
|
||||
Check out the [documentation](introduction) for installation instructions.
|
||||
Check out the [documentation](https://continuwuity.org) for installation instructions.
|
||||
|
||||
There are currently no open registration Continuwuity instances available.
|
||||
|
||||
|
||||
@@ -1,83 +0,0 @@
|
||||
[Unit]
|
||||
|
||||
Description=Continuwuity - Matrix homeserver
|
||||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
Documentation=https://continuwuity.org/
|
||||
RequiresMountsFor=/var/lib/private/conduwuit
|
||||
Alias=matrix-conduwuit.service
|
||||
|
||||
[Service]
|
||||
DynamicUser=yes
|
||||
Type=notify-reload
|
||||
ReloadSignal=SIGUSR1
|
||||
|
||||
TTYPath=/dev/tty25
|
||||
DeviceAllow=char-tty
|
||||
StandardInput=tty-force
|
||||
StandardOutput=tty
|
||||
StandardError=journal+console
|
||||
|
||||
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
|
||||
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
||||
|
||||
TTYReset=yes
|
||||
# uncomment to allow buffer to be cleared every restart
|
||||
TTYVTDisallocate=no
|
||||
|
||||
TTYColumns=120
|
||||
TTYRows=40
|
||||
|
||||
AmbientCapabilities=
|
||||
CapabilityBoundingSet=
|
||||
|
||||
DevicePolicy=closed
|
||||
LockPersonality=yes
|
||||
MemoryDenyWriteExecute=yes
|
||||
NoNewPrivileges=yes
|
||||
#ProcSubset=pid
|
||||
ProtectClock=yes
|
||||
ProtectControlGroups=yes
|
||||
ProtectHome=yes
|
||||
ProtectHostname=yes
|
||||
ProtectKernelLogs=yes
|
||||
ProtectKernelModules=yes
|
||||
ProtectKernelTunables=yes
|
||||
ProtectProc=invisible
|
||||
ProtectSystem=strict
|
||||
PrivateDevices=yes
|
||||
PrivateMounts=yes
|
||||
PrivateTmp=yes
|
||||
PrivateUsers=yes
|
||||
PrivateIPC=yes
|
||||
RemoveIPC=yes
|
||||
RestrictAddressFamilies=AF_INET AF_INET6 AF_UNIX
|
||||
RestrictNamespaces=yes
|
||||
RestrictRealtime=yes
|
||||
RestrictSUIDSGID=yes
|
||||
SystemCallArchitectures=native
|
||||
SystemCallFilter=@system-service @resources
|
||||
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
|
||||
SystemCallErrorNumber=EPERM
|
||||
StateDirectory=conduwuit
|
||||
|
||||
RuntimeDirectory=conduwuit
|
||||
RuntimeDirectoryMode=0750
|
||||
|
||||
Environment=CONTINUWUITY_CONFIG=${CREDENTIALS_DIRECTORY}/config.toml
|
||||
LoadCredential=config.toml:/etc/conduwuit/conduwuit.toml
|
||||
BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit
|
||||
BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit
|
||||
|
||||
ExecStart=/usr/bin/conduwuit
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
TimeoutStopSec=4m
|
||||
TimeoutStartSec=4m
|
||||
|
||||
StartLimitInterval=1m
|
||||
StartLimitBurst=5
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -79,9 +79,11 @@
|
||||
# This is the only directory where continuwuity will save its data,
|
||||
# including media. Note: this was previously "/var/lib/matrix-conduit".
|
||||
#
|
||||
# YOU NEED TO EDIT THIS.
|
||||
# YOU NEED TO EDIT THIS, UNLESS you are running continuwuity as a
|
||||
# `systemd` service. The service file sets it to `/var/lib/conduwuit`
|
||||
# using an environment variable and also grants write access.
|
||||
#
|
||||
# example: "/var/lib/continuwuity"
|
||||
# example: "/var/lib/conduwuit"
|
||||
#
|
||||
#database_path =
|
||||
|
||||
@@ -589,13 +591,19 @@
|
||||
#
|
||||
#default_room_version = 11
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
# Enable OpenTelemetry OTLP tracing export. This replaces the deprecated
|
||||
# Jaeger exporter. Traces will be sent via OTLP to a collector (such as
|
||||
# Jaeger) that supports the OpenTelemetry Protocol.
|
||||
#
|
||||
#allow_jaeger = false
|
||||
# Configure your OTLP endpoint using the OTEL_EXPORTER_OTLP_ENDPOINT
|
||||
# environment variable (defaults to http://localhost:4318).
|
||||
#
|
||||
#allow_otlp = false
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
# Filter for OTLP tracing spans. This controls which spans are exported
|
||||
# to the OTLP collector.
|
||||
#
|
||||
#jaeger_filter = "info"
|
||||
#otlp_filter = "info"
|
||||
|
||||
# If the 'perf_measurements' compile-time feature is enabled, enables
|
||||
# collecting folded stack trace profile of tracing spans using
|
||||
@@ -1696,6 +1704,10 @@
|
||||
#
|
||||
#config_reload_signal = true
|
||||
|
||||
# This item is undocumented. Please contribute documentation for it.
|
||||
#
|
||||
#ldap = false
|
||||
|
||||
[global.tls]
|
||||
|
||||
# Path to a valid TLS certificate file.
|
||||
@@ -1774,3 +1786,91 @@
|
||||
# is 33.55MB. Setting it to 0 disables blurhashing.
|
||||
#
|
||||
#blurhash_max_raw_size = 33554432
|
||||
|
||||
[global.ldap]
|
||||
|
||||
# Whether to enable LDAP login.
|
||||
#
|
||||
# example: "true"
|
||||
#
|
||||
#enable = false
|
||||
|
||||
# Whether to force LDAP authentication or authorize classical password
|
||||
# login.
|
||||
#
|
||||
# example: "true"
|
||||
#
|
||||
#ldap_only = false
|
||||
|
||||
# URI of the LDAP server.
|
||||
#
|
||||
# example: "ldap://ldap.example.com:389"
|
||||
#
|
||||
#uri = ""
|
||||
|
||||
# Root of the searches.
|
||||
#
|
||||
# example: "ou=users,dc=example,dc=org"
|
||||
#
|
||||
#base_dn = ""
|
||||
|
||||
# Bind DN if anonymous search is not enabled.
|
||||
#
|
||||
# You can use the variable `{username}` that will be replaced by the
|
||||
# entered username. In such case, the password used to bind will be the
|
||||
# one provided for the login and not the one given by
|
||||
# `bind_password_file`. Beware: automatically granting admin rights will
|
||||
# not work if you use this direct bind instead of a LDAP search.
|
||||
#
|
||||
# example: "cn=ldap-reader,dc=example,dc=org" or
|
||||
# "cn={username},ou=users,dc=example,dc=org"
|
||||
#
|
||||
#bind_dn = ""
|
||||
|
||||
# Path to a file on the system that contains the password for the
|
||||
# `bind_dn`.
|
||||
#
|
||||
# The server must be able to access the file, and it must not be empty.
|
||||
#
|
||||
#bind_password_file = ""
|
||||
|
||||
# Search filter to limit user searches.
|
||||
#
|
||||
# You can use the variable `{username}` that will be replaced by the
|
||||
# entered username for more complex filters.
|
||||
#
|
||||
# example: "(&(objectClass=person)(memberOf=matrix))"
|
||||
#
|
||||
#filter = "(objectClass=*)"
|
||||
|
||||
# Attribute to use to uniquely identify the user.
|
||||
#
|
||||
# example: "uid" or "cn"
|
||||
#
|
||||
#uid_attribute = "uid"
|
||||
|
||||
# Attribute containing the display name of the user.
|
||||
#
|
||||
# example: "givenName" or "sn"
|
||||
#
|
||||
#name_attribute = "givenName"
|
||||
|
||||
# Root of the searches for admin users.
|
||||
#
|
||||
# Defaults to `base_dn` if empty.
|
||||
#
|
||||
# example: "ou=admins,dc=example,dc=org"
|
||||
#
|
||||
#admin_base_dn = ""
|
||||
|
||||
# The LDAP search filter to find administrative users for continuwuity.
|
||||
#
|
||||
# If left blank, administrative state must be configured manually for each
|
||||
# user.
|
||||
#
|
||||
# You can use the variable `{username}` that will be replaced by the
|
||||
# entered username for more complex filters.
|
||||
#
|
||||
# example: "(objectClass=conduwuitAdmin)" or "(uid={username})"
|
||||
#
|
||||
#admin_filter = ""
|
||||
|
||||
44
debian/postinst
vendored
44
debian/postinst
vendored
@@ -1,44 +0,0 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# TODO: implement debconf support that is maintainable without duplicating the config
|
||||
#. /usr/share/debconf/confmodule
|
||||
|
||||
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
|
||||
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
# Create the `conduwuit` user if it does not exist yet.
|
||||
if ! getent passwd conduwuit > /dev/null ; then
|
||||
echo 'Adding system user for the conduwuit Matrix homeserver' 1>&2
|
||||
adduser --system --group --quiet \
|
||||
--home "$CONDUWUIT_DATABASE_PATH" \
|
||||
--disabled-login \
|
||||
--shell "/usr/sbin/nologin" \
|
||||
conduwuit
|
||||
fi
|
||||
|
||||
# Create the database path if it does not exist yet and fix up ownership
|
||||
# and permissions for the config.
|
||||
mkdir -v -p "$CONDUWUIT_DATABASE_PATH"
|
||||
|
||||
# symlink the previous location for compatibility if it does not exist yet.
|
||||
if ! test -L "/var/lib/matrix-conduit" ; then
|
||||
ln -s -v "$CONDUWUIT_DATABASE_PATH" "/var/lib/matrix-conduit"
|
||||
fi
|
||||
|
||||
chown -v conduwuit:conduwuit -R "$CONDUWUIT_DATABASE_PATH"
|
||||
chown -v conduwuit:conduwuit -R "$CONDUWUIT_CONFIG_PATH"
|
||||
|
||||
chmod -v 740 "$CONDUWUIT_DATABASE_PATH"
|
||||
|
||||
echo ''
|
||||
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
|
||||
echo 'To start the server, run: systemctl start conduwuit.service'
|
||||
echo ''
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
#DEBHELPER#
|
||||
@@ -48,11 +48,13 @@ EOF
|
||||
|
||||
# Developer tool versions
|
||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||
ENV BINSTALL_VERSION=1.13.0
|
||||
ENV BINSTALL_VERSION=1.15.7
|
||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||
ENV CARGO_SBOM_VERSION=0.9.1
|
||||
# renovate: datasource=crate depName=lddtree
|
||||
ENV LDDTREE_VERSION=0.3.7
|
||||
# renovate: datasource=crate depName=timelord-cli
|
||||
ENV TIMELORD_VERSION=3.0.1
|
||||
|
||||
# Install unpackaged tools
|
||||
RUN <<EOF
|
||||
@@ -60,6 +62,7 @@ RUN <<EOF
|
||||
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
|
||||
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
|
||||
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
|
||||
cargo binstall --no-confirm timelord-cli --version $TIMELORD_VERSION
|
||||
EOF
|
||||
|
||||
# Set up xx (cross-compilation scripts)
|
||||
@@ -78,17 +81,20 @@ RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \
|
||||
WORKDIR /app
|
||||
COPY ./rust-toolchain.toml .
|
||||
RUN rustc --version \
|
||||
&& rustup target add $(xx-cargo --print-target-triple)
|
||||
&& xx-cargo --setup-target-triple
|
||||
|
||||
# Build binary
|
||||
# We disable incremental compilation to save disk space, as it only produces a minimal speedup for this case.
|
||||
RUN echo "CARGO_INCREMENTAL=0" >> /etc/environment
|
||||
# Configure incremental compilation based on build context
|
||||
ARG CARGO_INCREMENTAL=0
|
||||
RUN echo "CARGO_INCREMENTAL=${CARGO_INCREMENTAL}" >> /etc/environment
|
||||
|
||||
# Configure pkg-config
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
|
||||
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
|
||||
if command -v "$(xx-info)-pkg-config" >/dev/null 2>/dev/null; then
|
||||
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
|
||||
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
|
||||
fi
|
||||
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
|
||||
EOF
|
||||
|
||||
@@ -109,16 +115,17 @@ RUN <<EOF
|
||||
EOF
|
||||
|
||||
# Apply CPU-specific optimizations if TARGET_CPU is provided
|
||||
ARG TARGET_CPU=
|
||||
ARG TARGET_CPU
|
||||
|
||||
RUN <<EOF
|
||||
set -o allexport
|
||||
set -o xtrace
|
||||
. /etc/environment
|
||||
if [ -n "${TARGET_CPU}" ]; then
|
||||
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
||||
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
||||
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
|
||||
fi
|
||||
set -o allexport
|
||||
set -o xtrace
|
||||
. /etc/environment
|
||||
if [ -n "${TARGET_CPU}" ]; then
|
||||
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
||||
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
||||
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
|
||||
fi
|
||||
EOF
|
||||
|
||||
# Prepare output directories
|
||||
@@ -130,18 +137,23 @@ FROM toolchain AS builder
|
||||
# Get source
|
||||
COPY . .
|
||||
|
||||
# Restore timestamps from timelord cache if available
|
||||
RUN --mount=type=cache,target=/timelord/ \
|
||||
echo "Restoring timestamps from timelord cache"; \
|
||||
timelord sync --source-dir /app --cache-dir /timelord;
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# Verify environment configuration
|
||||
RUN xx-cargo --print-target-triple
|
||||
|
||||
# Conduwuit version info
|
||||
ARG GIT_COMMIT_HASH=
|
||||
ARG GIT_COMMIT_HASH_SHORT=
|
||||
ARG GIT_REMOTE_URL=
|
||||
ARG GIT_REMOTE_COMMIT_URL=
|
||||
ARG CONDUWUIT_VERSION_EXTRA=
|
||||
ARG CONTINUWUITY_VERSION_EXTRA=
|
||||
ARG GIT_COMMIT_HASH
|
||||
ARG GIT_COMMIT_HASH_SHORT
|
||||
ARG GIT_REMOTE_URL
|
||||
ARG GIT_REMOTE_COMMIT_URL
|
||||
ARG CONDUWUIT_VERSION_EXTRA
|
||||
ARG CONTINUWUITY_VERSION_EXTRA
|
||||
ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH
|
||||
ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT
|
||||
ENV GIT_REMOTE_URL=$GIT_REMOTE_URL
|
||||
@@ -154,7 +166,7 @@ ARG RUST_PROFILE=release
|
||||
# Build the binary
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
||||
--mount=type=cache,target=/app/target,id=cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-${RUST_PROFILE} \
|
||||
--mount=type=cache,target=/app/target,id=continuwuity-cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-${RUST_PROFILE} \
|
||||
bash <<'EOF'
|
||||
set -o allexport
|
||||
set -o xtrace
|
||||
@@ -169,8 +181,8 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
|
||||
for BINARY in "${BINARIES[@]}"; do
|
||||
echo $BINARY
|
||||
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
|
||||
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY
|
||||
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE}/$BINARY
|
||||
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/${RUST_PROFILE}/$BINARY /out/sbin/$BINARY
|
||||
done
|
||||
EOF
|
||||
|
||||
@@ -196,32 +208,57 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
EOF
|
||||
|
||||
# Extract dynamically linked dependencies
|
||||
RUN <<EOF
|
||||
RUN <<'DEPS_EOF'
|
||||
set -o xtrace
|
||||
mkdir /out/libs
|
||||
mkdir /out/libs-root
|
||||
mkdir /out/libs /out/libs-root
|
||||
|
||||
# Process each binary
|
||||
for BINARY in /out/sbin/*; do
|
||||
lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
|
||||
if lddtree_output=$(lddtree "$BINARY" 2>/dev/null) && [ -n "$lddtree_output" ]; then
|
||||
echo "$lddtree_output" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | \
|
||||
awk '{dest = ($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2; print "install -D " $1 " " dest}' | \
|
||||
while read cmd; do eval "$cmd"; done
|
||||
fi
|
||||
done
|
||||
EOF
|
||||
|
||||
# Show what will be copied to runtime
|
||||
echo "=== Libraries being copied to runtime image:"
|
||||
find /out/libs* -type f 2>/dev/null | sort || echo "No libraries found"
|
||||
DEPS_EOF
|
||||
|
||||
FROM ubuntu:latest AS prepper
|
||||
|
||||
# Create layer structure
|
||||
RUN mkdir -p /layer1/etc/ssl/certs \
|
||||
/layer2/usr/lib \
|
||||
/layer3/sbin /layer3/sbom
|
||||
|
||||
# Copy SSL certs and root-path libraries to layer1 (ultra-stable)
|
||||
COPY --from=base /etc/ssl/certs /layer1/etc/ssl/certs
|
||||
COPY --from=builder /out/libs-root/ /layer1/
|
||||
|
||||
# Copy application libraries to layer2 (semi-stable)
|
||||
COPY --from=builder /out/libs/ /layer2/usr/lib/
|
||||
|
||||
# Copy binaries and SBOM to layer3 (volatile)
|
||||
COPY --from=builder /out/sbin/ /layer3/sbin/
|
||||
COPY --from=builder /out/sbom/ /layer3/sbom/
|
||||
|
||||
# Fix permissions after copying
|
||||
RUN chmod -R 755 /layer1 /layer2 /layer3
|
||||
|
||||
FROM scratch
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# Copy root certs for tls into image
|
||||
# You can also mount the certs from the host
|
||||
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
|
||||
COPY --from=base /etc/ssl/certs /etc/ssl/certs
|
||||
# Copy ultra-stable layer (SSL certs, system libraries)
|
||||
COPY --from=prepper /layer1/ /
|
||||
|
||||
# Copy our build
|
||||
COPY --from=builder /out/sbin/ /sbin/
|
||||
# Copy SBOM
|
||||
COPY --from=builder /out/sbom/ /sbom/
|
||||
# Copy semi-stable layer (application libraries)
|
||||
COPY --from=prepper /layer2/ /
|
||||
|
||||
# Copy dynamic libraries to root
|
||||
COPY --from=builder /out/libs-root/ /
|
||||
COPY --from=builder /out/libs/ /usr/lib/
|
||||
# Copy volatile layer (binaries, SBOM)
|
||||
COPY --from=prepper /layer3/ /
|
||||
|
||||
# Inform linker where to find libraries
|
||||
ENV LD_LIBRARY_PATH=/usr/lib
|
||||
|
||||
200
docker/musl.Dockerfile
Normal file
200
docker/musl.Dockerfile
Normal file
@@ -0,0 +1,200 @@
|
||||
# Why does this exist?
|
||||
# Debian doesn't provide prebuilt musl packages
|
||||
# rocksdb requires a prebuilt liburing, and linking fails if a gnu one is provided
|
||||
|
||||
ARG RUST_VERSION=1
|
||||
ARG ALPINE_VERSION=3.22
|
||||
|
||||
FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx
|
||||
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS base
|
||||
FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS toolchain
|
||||
|
||||
# Install repo tools and dependencies
|
||||
RUN --mount=type=cache,target=/etc/apk/cache apk add \
|
||||
build-base pkgconfig make jq bash \
|
||||
curl git file \
|
||||
llvm-dev clang clang-static lld
|
||||
|
||||
|
||||
# Developer tool versions
|
||||
# renovate: datasource=github-releases depName=cargo-bins/cargo-binstall
|
||||
ENV BINSTALL_VERSION=1.15.7
|
||||
# renovate: datasource=github-releases depName=psastras/sbom-rs
|
||||
ENV CARGO_SBOM_VERSION=0.9.1
|
||||
# renovate: datasource=crate depName=lddtree
|
||||
ENV LDDTREE_VERSION=0.3.7
|
||||
|
||||
# Install unpackaged tools
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
curl --retry 5 -L --proto '=https' --tlsv1.2 -sSf https://raw.githubusercontent.com/cargo-bins/cargo-binstall/main/install-from-binstall-release.sh | bash
|
||||
cargo binstall --no-confirm cargo-sbom --version $CARGO_SBOM_VERSION
|
||||
cargo binstall --no-confirm lddtree --version $LDDTREE_VERSION
|
||||
EOF
|
||||
|
||||
# Set up xx (cross-compilation scripts)
|
||||
COPY --from=xx / /
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# Install libraries linked by the binary
|
||||
RUN --mount=type=cache,target=/etc/apk/cache xx-apk add musl-dev gcc g++ liburing-dev
|
||||
|
||||
# Set up Rust toolchain
|
||||
WORKDIR /app
|
||||
COPY ./rust-toolchain.toml .
|
||||
RUN rustc --version \
|
||||
&& xx-cargo --setup-target-triple
|
||||
|
||||
# Build binary
|
||||
# We disable incremental compilation to save disk space, as it only produces a minimal speedup for this case.
|
||||
RUN echo "CARGO_INCREMENTAL=0" >> /etc/environment
|
||||
|
||||
# Configure pkg-config
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
if command -v "$(xx-info)-pkg-config" >/dev/null 2>/dev/null; then
|
||||
echo "PKG_CONFIG_LIBDIR=/usr/lib/$(xx-info)/pkgconfig" >> /etc/environment
|
||||
echo "PKG_CONFIG=/usr/bin/$(xx-info)-pkg-config" >> /etc/environment
|
||||
fi
|
||||
echo "PKG_CONFIG_ALLOW_CROSS=true" >> /etc/environment
|
||||
EOF
|
||||
|
||||
# Configure cc to use clang version
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
echo "CC=clang" >> /etc/environment
|
||||
echo "CXX=clang++" >> /etc/environment
|
||||
EOF
|
||||
|
||||
# Cross-language LTO
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
echo "CFLAGS=-flto" >> /etc/environment
|
||||
echo "CXXFLAGS=-flto" >> /etc/environment
|
||||
# Linker is set to target-compatible clang by xx
|
||||
echo "RUSTFLAGS='-Clinker-plugin-lto -Clink-arg=-fuse-ld=lld'" >> /etc/environment
|
||||
EOF
|
||||
|
||||
# Apply CPU-specific optimizations if TARGET_CPU is provided
|
||||
ARG TARGET_CPU
|
||||
|
||||
RUN <<EOF
|
||||
set -o allexport
|
||||
set -o xtrace
|
||||
. /etc/environment
|
||||
if [ -n "${TARGET_CPU}" ]; then
|
||||
echo "CFLAGS='${CFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
||||
echo "CXXFLAGS='${CXXFLAGS} -march=${TARGET_CPU}'" >> /etc/environment
|
||||
echo "RUSTFLAGS='${RUSTFLAGS} -C target-cpu=${TARGET_CPU}'" >> /etc/environment
|
||||
fi
|
||||
EOF
|
||||
|
||||
# Prepare output directories
|
||||
RUN mkdir /out
|
||||
|
||||
FROM toolchain AS builder
|
||||
|
||||
|
||||
# Get source
|
||||
COPY . .
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
# Verify environment configuration
|
||||
RUN xx-cargo --print-target-triple
|
||||
|
||||
# Conduwuit version info
|
||||
ARG GIT_COMMIT_HASH
|
||||
ARG GIT_COMMIT_HASH_SHORT
|
||||
ARG GIT_REMOTE_URL
|
||||
ARG GIT_REMOTE_COMMIT_URL
|
||||
ARG CONDUWUIT_VERSION_EXTRA
|
||||
ARG CONTINUWUITY_VERSION_EXTRA
|
||||
ENV GIT_COMMIT_HASH=$GIT_COMMIT_HASH
|
||||
ENV GIT_COMMIT_HASH_SHORT=$GIT_COMMIT_HASH_SHORT
|
||||
ENV GIT_REMOTE_URL=$GIT_REMOTE_URL
|
||||
ENV GIT_REMOTE_COMMIT_URL=$GIT_REMOTE_COMMIT_URL
|
||||
ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA
|
||||
ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA
|
||||
|
||||
ARG RUST_PROFILE=release
|
||||
|
||||
# Build the binary
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
||||
--mount=type=cache,target=/app/target,id=continuwuity-cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-musl-${RUST_PROFILE} \
|
||||
bash <<'EOF'
|
||||
set -o allexport
|
||||
set -o xtrace
|
||||
. /etc/environment
|
||||
TARGET_DIR=($(cargo metadata --no-deps --format-version 1 | \
|
||||
jq -r ".target_directory"))
|
||||
mkdir /out/sbin
|
||||
PACKAGE=conduwuit
|
||||
xx-cargo build --locked --profile ${RUST_PROFILE} \
|
||||
-p $PACKAGE --no-default-features --features bindgen-static,release_max_log_level,standard;
|
||||
BINARIES=($(cargo metadata --no-deps --format-version 1 | \
|
||||
jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name"))
|
||||
for BINARY in "${BINARIES[@]}"; do
|
||||
echo $BINARY
|
||||
xx-verify $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY
|
||||
cp $TARGET_DIR/$(xx-cargo --print-target-triple)/release/$BINARY /out/sbin/$BINARY
|
||||
done
|
||||
EOF
|
||||
|
||||
# Generate Software Bill of Materials (SBOM)
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git/db \
|
||||
bash <<'EOF'
|
||||
set -o xtrace
|
||||
mkdir /out/sbom
|
||||
typeset -A PACKAGES
|
||||
for BINARY in /out/sbin/*; do
|
||||
BINARY_BASE=$(basename ${BINARY})
|
||||
package=$(cargo metadata --no-deps --format-version 1 | jq -r ".packages[] | select(.targets[] | select( .kind | map(. == \"bin\") | any ) | .name == \"$BINARY_BASE\") | .name")
|
||||
if [ -z "$package" ]; then
|
||||
continue
|
||||
fi
|
||||
PACKAGES[$package]=1
|
||||
done
|
||||
for PACKAGE in $(echo ${!PACKAGES[@]}); do
|
||||
echo $PACKAGE
|
||||
cargo sbom --cargo-package $PACKAGE > /out/sbom/$PACKAGE.spdx.json
|
||||
done
|
||||
EOF
|
||||
|
||||
# Extract dynamically linked dependencies
|
||||
RUN <<EOF
|
||||
set -o xtrace
|
||||
mkdir /out/libs
|
||||
mkdir /out/libs-root
|
||||
for BINARY in /out/sbin/*; do
|
||||
lddtree "$BINARY" | awk '{print $(NF-0) " " $1}' | sort -u -k 1,1 | awk '{print "install", "-D", $1, (($2 ~ /^\//) ? "/out/libs-root" $2 : "/out/libs/" $2)}' | xargs -I {} sh -c {}
|
||||
done
|
||||
EOF
|
||||
|
||||
FROM scratch
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# Copy root certs for tls into image
|
||||
# You can also mount the certs from the host
|
||||
# --volume /etc/ssl/certs:/etc/ssl/certs:ro
|
||||
COPY --from=base /etc/ssl/certs /etc/ssl/certs
|
||||
|
||||
# Copy our build
|
||||
COPY --from=builder /out/sbin/ /sbin/
|
||||
# Copy SBOM
|
||||
COPY --from=builder /out/sbom/ /sbom/
|
||||
|
||||
# Copy dynamic libraries to root
|
||||
COPY --from=builder /out/libs-root/ /
|
||||
COPY --from=builder /out/libs/ /usr/lib/
|
||||
|
||||
# Inform linker where to find libraries
|
||||
ENV LD_LIBRARY_PATH=/usr/lib
|
||||
|
||||
# Continuwuity default port
|
||||
EXPOSE 8008
|
||||
|
||||
CMD ["/sbin/conduwuit"]
|
||||
@@ -10,6 +10,7 @@ # Summary
|
||||
- [Kubernetes](deploying/kubernetes.md)
|
||||
- [Arch Linux](deploying/arch-linux.md)
|
||||
- [Debian](deploying/debian.md)
|
||||
- [Fedora](deploying/fedora.md)
|
||||
- [FreeBSD](deploying/freebsd.md)
|
||||
- [TURN](turn.md)
|
||||
- [Appservices](appservices.md)
|
||||
|
||||
@@ -21,6 +21,7 @@ # Command-Line Help for `admin`
|
||||
* [`admin users list-joined-rooms`↴](#admin-users-list-joined-rooms)
|
||||
* [`admin users force-join-room`↴](#admin-users-force-join-room)
|
||||
* [`admin users force-leave-room`↴](#admin-users-force-leave-room)
|
||||
* [`admin users force-leave-remote-room`↴](#admin-users-force-leave-remote-room)
|
||||
* [`admin users force-demote`↴](#admin-users-force-demote)
|
||||
* [`admin users make-user-admin`↴](#admin-users-make-user-admin)
|
||||
* [`admin users put-room-tag`↴](#admin-users-put-room-tag)
|
||||
@@ -295,6 +296,7 @@ ###### **Subcommands:**
|
||||
* `list-joined-rooms` — - Lists all the rooms (local and remote) that the specified user is joined in
|
||||
* `force-join-room` — - Manually join a local user to a room
|
||||
* `force-leave-room` — - Manually leave a local user from a room
|
||||
* `force-leave-remote-room` — - Manually leave a remote room for a local user
|
||||
* `force-demote` — - Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
|
||||
* `make-user-admin` — - Grant server-admin privileges to a user
|
||||
* `put-room-tag` — - Puts a room tag for the specified user and room ID
|
||||
@@ -449,6 +451,19 @@ ###### **Arguments:**
|
||||
|
||||
|
||||
|
||||
## `admin users force-leave-remote-room`
|
||||
|
||||
- Manually leave a remote room for a local user
|
||||
|
||||
**Usage:** `admin users force-leave-remote-room <USER_ID> <ROOM_ID>`
|
||||
|
||||
###### **Arguments:**
|
||||
|
||||
* `<USER_ID>`
|
||||
* `<ROOM_ID>`
|
||||
|
||||
|
||||
|
||||
## `admin users force-demote`
|
||||
|
||||
- Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits
|
||||
|
||||
@@ -9,24 +9,11 @@ ## Example configuration
|
||||
|
||||
</details>
|
||||
|
||||
## Debian systemd unit file
|
||||
## systemd unit file
|
||||
|
||||
<details>
|
||||
<summary>Debian systemd unit file</summary>
|
||||
<summary>systemd unit file</summary>
|
||||
|
||||
```
|
||||
{{#include ../../debian/conduwuit.service}}
|
||||
{{#include ../../pkg/conduwuit.service}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
## Arch Linux systemd unit file
|
||||
|
||||
<details>
|
||||
<summary>Arch Linux systemd unit file</summary>
|
||||
|
||||
```
|
||||
{{#include ../../arch/conduwuit.service}}
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
@@ -1 +1 @@
|
||||
{{#include ../../debian/README.md}}
|
||||
{{#include ../../pkg/debian/README.md}}
|
||||
|
||||
@@ -12,6 +12,15 @@ services:
|
||||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||
networks:
|
||||
- proxy
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
|
||||
- "traefik.http.routers.continuwuity.entrypoints=websecure" # your HTTPS entry point
|
||||
- "traefik.http.routers.continuwuity.tls=true"
|
||||
- "traefik.http.routers.continuwuity.service=continuwuity"
|
||||
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
|
||||
# possibly, depending on your config:
|
||||
# - "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
|
||||
environment:
|
||||
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
|
||||
CONTINUWUITY_DATABASE_PATH: /var/lib/continuwuity
|
||||
|
||||
@@ -12,6 +12,14 @@ services:
|
||||
#- ./continuwuity.toml:/etc/continuwuity.toml
|
||||
networks:
|
||||
- proxy
|
||||
labels:
|
||||
- "traefik.enable=true"
|
||||
- "traefik.http.routers.continuwuity.rule=(Host(`matrix.example.com`) || (Host(`example.com`) && PathPrefix(`/.well-known/matrix`)))"
|
||||
- "traefik.http.routers.continuwuity.entrypoints=websecure"
|
||||
- "traefik.http.routers.continuwuity.tls.certresolver=letsencrypt"
|
||||
- "traefik.http.services.continuwuity.loadbalancer.server.port=6167"
|
||||
# Uncomment and adjust the following if you want to use middleware
|
||||
# - "traefik.http.routers.continuwuity.middlewares=secureHeaders@file"
|
||||
environment:
|
||||
CONTINUWUITY_SERVER_NAME: your.server.name.example # EDIT THIS
|
||||
CONTINUWUITY_TRUSTED_SERVERS: '["matrix.org"]'
|
||||
|
||||
201
docs/deploying/fedora.md
Normal file
201
docs/deploying/fedora.md
Normal file
@@ -0,0 +1,201 @@
|
||||
# RPM Installation Guide
|
||||
|
||||
Continuwuity is available as RPM packages for Fedora, RHEL, and compatible distributions.
|
||||
|
||||
The RPM packaging files are maintained in the `fedora/` directory:
|
||||
- `continuwuity.spec.rpkg` - RPM spec file using rpkg macros for building from git
|
||||
- `continuwuity.service` - Systemd service file for the server
|
||||
- `RPM-GPG-KEY-continuwuity.asc` - GPG public key for verifying signed packages
|
||||
|
||||
RPM packages built by CI are signed with our GPG key (Ed25519, ID: `5E0FF73F411AAFCA`).
|
||||
|
||||
```bash
|
||||
# Import the signing key
|
||||
sudo rpm --import https://forgejo.ellis.link/continuwuation/continuwuity/raw/branch/main/fedora/RPM-GPG-KEY-continuwuity.asc
|
||||
|
||||
# Verify a downloaded package
|
||||
rpm --checksig continuwuity-*.rpm
|
||||
```
|
||||
|
||||
## Installation methods
|
||||
|
||||
**Stable releases** (recommended)
|
||||
|
||||
```bash
|
||||
# Add the repository and install
|
||||
sudo dnf config-manager addrepo --from-repofile=https://forgejo.ellis.link/api/packages/continuwuation/rpm/stable/continuwuation.repo
|
||||
sudo dnf install continuwuity
|
||||
```
|
||||
|
||||
**Development builds** from main branch
|
||||
|
||||
```bash
|
||||
# Add the dev repository and install
|
||||
sudo dnf config-manager addrepo --from-repofile=https://forgejo.ellis.link/api/packages/continuwuation/rpm/dev/continuwuation.repo
|
||||
sudo dnf install continuwuity
|
||||
```
|
||||
|
||||
**Feature branch builds** (example: `tom/new-feature`)
|
||||
|
||||
```bash
|
||||
# Branch names are sanitized (slashes become hyphens, lowercase only)
|
||||
sudo dnf config-manager addrepo --from-repofile=https://forgejo.ellis.link/api/packages/continuwuation/rpm/tom-new-feature/continuwuation.repo
|
||||
sudo dnf install continuwuity
|
||||
```
|
||||
|
||||
**Direct installation** without adding repository
|
||||
|
||||
```bash
|
||||
# Latest stable release
|
||||
sudo dnf install https://forgejo.ellis.link/api/packages/continuwuation/rpm/stable/continuwuity
|
||||
|
||||
# Latest development build
|
||||
sudo dnf install https://forgejo.ellis.link/api/packages/continuwuation/rpm/dev/continuwuity
|
||||
|
||||
# Specific feature branch
|
||||
sudo dnf install https://forgejo.ellis.link/api/packages/continuwuation/rpm/branch-name/continuwuity
|
||||
```
|
||||
|
||||
**Manual repository configuration** (alternative method)
|
||||
|
||||
```bash
|
||||
cat << 'EOF' | sudo tee /etc/yum.repos.d/continuwuity.repo
|
||||
[continuwuity]
|
||||
name=Continuwuity - Matrix homeserver
|
||||
baseurl=https://forgejo.ellis.link/api/packages/continuwuation/rpm/stable
|
||||
enabled=1
|
||||
gpgcheck=1
|
||||
gpgkey=https://forgejo.ellis.link/continuwuation/continuwuity/raw/branch/main/fedora/RPM-GPG-KEY-continuwuity.asc
|
||||
EOF
|
||||
|
||||
sudo dnf install continuwuity
|
||||
```
|
||||
|
||||
## Package management
|
||||
|
||||
**Automatic updates** with DNF Automatic
|
||||
|
||||
```bash
|
||||
# Install and configure
|
||||
sudo dnf install dnf-automatic
|
||||
sudo nano /etc/dnf/automatic.conf # Set: apply_updates = yes
|
||||
sudo systemctl enable --now dnf-automatic.timer
|
||||
```
|
||||
|
||||
**Manual updates**
|
||||
|
||||
```bash
|
||||
# Check for updates
|
||||
sudo dnf check-update continuwuity
|
||||
|
||||
# Update to latest version
|
||||
sudo dnf update continuwuity
|
||||
```
|
||||
|
||||
**Switching channels** (stable/dev/feature branches)
|
||||
|
||||
```bash
|
||||
# List enabled repositories
|
||||
dnf repolist | grep continuwuation
|
||||
|
||||
# Disable current repository
|
||||
sudo dnf config-manager --set-disabled continuwuation-stable # or -dev, or branch name
|
||||
|
||||
# Enable desired repository
|
||||
sudo dnf config-manager --set-enabled continuwuation-dev # or -stable, or branch name
|
||||
|
||||
# Update to the new channel's version
|
||||
sudo dnf update continuwuity
|
||||
```
|
||||
|
||||
**Verifying installation**
|
||||
|
||||
```bash
|
||||
# Check installed version
|
||||
rpm -q continuwuity
|
||||
|
||||
# View package information
|
||||
rpm -qi continuwuity
|
||||
|
||||
# List installed files
|
||||
rpm -ql continuwuity
|
||||
|
||||
# Verify package integrity
|
||||
rpm -V continuwuity
|
||||
```
|
||||
|
||||
## Service management and removal
|
||||
|
||||
**Systemd service commands**
|
||||
|
||||
```bash
|
||||
# Start the service
|
||||
sudo systemctl start conduwuit
|
||||
|
||||
# Enable on boot
|
||||
sudo systemctl enable conduwuit
|
||||
|
||||
# Check status
|
||||
sudo systemctl status conduwuit
|
||||
|
||||
# View logs
|
||||
sudo journalctl -u conduwuit -f
|
||||
```
|
||||
|
||||
**Uninstallation**
|
||||
|
||||
```bash
|
||||
# Stop and disable the service
|
||||
sudo systemctl stop conduwuit
|
||||
sudo systemctl disable conduwuit
|
||||
|
||||
# Remove the package
|
||||
sudo dnf remove continuwuity
|
||||
|
||||
# Remove the repository (optional)
|
||||
sudo rm /etc/yum.repos.d/continuwuation-*.repo
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**GPG key errors**: Temporarily disable GPG checking
|
||||
|
||||
```bash
|
||||
sudo dnf --nogpgcheck install continuwuity
|
||||
```
|
||||
|
||||
**Repository metadata issues**: Clear and rebuild cache
|
||||
|
||||
```bash
|
||||
sudo dnf clean all
|
||||
sudo dnf makecache
|
||||
```
|
||||
|
||||
**Finding specific versions**
|
||||
|
||||
```bash
|
||||
# List all available versions
|
||||
dnf --showduplicates list continuwuity
|
||||
|
||||
# Install a specific version
|
||||
sudo dnf install continuwuity-<version>
|
||||
```
|
||||
|
||||
## Building locally
|
||||
|
||||
Build the RPM locally using rpkg:
|
||||
|
||||
```bash
|
||||
# Install dependencies
|
||||
sudo dnf install rpkg rpm-build cargo-rpm-macros systemd-rpm-macros
|
||||
|
||||
# Clone the repository
|
||||
git clone https://forgejo.ellis.link/continuwuation/continuwuity.git
|
||||
cd continuwuity
|
||||
|
||||
# Build SRPM
|
||||
rpkg srpm
|
||||
|
||||
# Build RPM
|
||||
rpmbuild --rebuild *.src.rpm
|
||||
```
|
||||
@@ -44,7 +44,7 @@ ### Building with the Rust toolchain
|
||||
- (On linux) `pkg-config` on the compiling machine to allow finding `liburing`
|
||||
- A C++ compiler and (on linux) `libclang` for RocksDB
|
||||
|
||||
You can build Continuwuity using `cargo build --release --all-features`.
|
||||
You can build Continuwuity using `cargo build --release`.
|
||||
|
||||
### Building with Nix
|
||||
|
||||
|
||||
4
docs/static/announcements.json
vendored
4
docs/static/announcements.json
vendored
@@ -6,8 +6,8 @@
|
||||
"message": "Welcome to Continuwuity! Important announcements about the project will appear here."
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"message": "🎉 Continuwuity v0.5.0-rc.6 is now available! This release includes improved knock-restricted room handling, automatic support contact configuration, and a new HTML landing page. Check [the release notes for full details](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.6) and upgrade instructions."
|
||||
"id": 3,
|
||||
"message": "_taps microphone_ The Continuwuity 0.5.0-rc.7 release is now available, and it's better than ever! **177 commits**, **35 pull requests**, **11 contributors,** and a lot of new stuff!\n\nFor highlights, we've got:\n\n* 🕵️ Full Policy Server support to fight spam!\n* 🚀 Smarter room & space upgrades.\n* 🚫 User suspension tools for better moderation.\n* 🤖 reCaptcha support for safer open registration.\n* 🔍 Ability to disable read receipts & typing indicators.\n* ⚡ Sweeping performance improvements!\n\nGet the [full changelog and downloads on our Forgejo](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.7) - and make sure you're in the [Announcements room](https://matrix.to/#/!releases:continuwuity.org/$hN9z6L2_dTAlPxFLAoXVfo_g8DyYXu4cpvWsSrWhmB0) to get stuff like this sooner."
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -83,7 +83,7 @@ env DIRENV_DEVSHELL=all-features \
|
||||
--workspace \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-features \
|
||||
--features full \
|
||||
--no-deps \
|
||||
--document-private-items \
|
||||
--color always
|
||||
@@ -96,7 +96,7 @@ script = """
|
||||
direnv exec . \
|
||||
cargo clippy \
|
||||
--workspace \
|
||||
--all-features \
|
||||
--features full \
|
||||
--locked \
|
||||
--profile test \
|
||||
--color=always \
|
||||
@@ -114,7 +114,7 @@ env DIRENV_DEVSHELL=all-features \
|
||||
--workspace \
|
||||
--locked \
|
||||
--profile test \
|
||||
--all-features \
|
||||
--features full \
|
||||
--color=always \
|
||||
-- \
|
||||
-D warnings
|
||||
|
||||
226
flake.lock
generated
226
flake.lock
generated
@@ -10,11 +10,11 @@
|
||||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1751403276,
|
||||
"narHash": "sha256-V0EPQNsQko1a8OqIWc2lLviLnMpR1m08Ej00z5RVTfs=",
|
||||
"lastModified": 1758711588,
|
||||
"narHash": "sha256-0nZlCCDC5PfndsQJXXtcyrtrfW49I3KadGMDlutzaGU=",
|
||||
"owner": "zhaofengli",
|
||||
"repo": "attic",
|
||||
"rev": "896ad88fa57ad5dbcd267c0ac51f1b71ccfcb4dd",
|
||||
"rev": "12cbeca141f46e1ade76728bce8adc447f2166c6",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -29,14 +29,14 @@
|
||||
"devenv": "devenv",
|
||||
"flake-compat": "flake-compat_2",
|
||||
"git-hooks": "git-hooks",
|
||||
"nixpkgs": "nixpkgs_4"
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1748883665,
|
||||
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
|
||||
"lastModified": 1756385612,
|
||||
"narHash": "sha256-+NU5MMhuPHHRyvZZWNFG7zt+leRSPsJu1MwhOUzkPUk=",
|
||||
"owner": "cachix",
|
||||
"repo": "cachix",
|
||||
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
|
||||
"rev": "dc24688cd67518c3711d511fa369c0f5a131063a",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -58,16 +58,21 @@
|
||||
],
|
||||
"git-hooks": [
|
||||
"cachix",
|
||||
"devenv"
|
||||
"devenv",
|
||||
"git-hooks"
|
||||
],
|
||||
"nixpkgs": "nixpkgs_2"
|
||||
"nixpkgs": [
|
||||
"cachix",
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1744206633,
|
||||
"narHash": "sha256-pb5aYkE8FOoa4n123slgHiOf1UbNSnKe5pEZC+xXD5g=",
|
||||
"lastModified": 1748883665,
|
||||
"narHash": "sha256-R0W7uAg+BLoHjMRMQ8+oiSbTq8nkGz5RDpQ+ZfxxP3A=",
|
||||
"owner": "cachix",
|
||||
"repo": "cachix",
|
||||
"rev": "8a60090640b96f9df95d1ab99e5763a586be1404",
|
||||
"rev": "f707778d902af4d62d8dd92c269f8e70de09acbe",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -78,18 +83,12 @@
|
||||
}
|
||||
},
|
||||
"crane": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"attic",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1722960479,
|
||||
"narHash": "sha256-NhCkJJQhD5GUib8zN9JrmYGMwt4lCRp6ZVNzIiYCl0Y=",
|
||||
"lastModified": 1751562746,
|
||||
"narHash": "sha256-smpugNIkmDeicNz301Ll1bD7nFOty97T79m4GUMUczA=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "4c6c77920b8d44cd6660c1621dea6b3fc4b4c4f4",
|
||||
"rev": "aed2020fd3dc26e1e857d4107a5a67a33ab6c1fd",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -100,11 +99,11 @@
|
||||
},
|
||||
"crane_2": {
|
||||
"locked": {
|
||||
"lastModified": 1750266157,
|
||||
"narHash": "sha256-tL42YoNg9y30u7zAqtoGDNdTyXTi8EALDeCB13FtbQA=",
|
||||
"lastModified": 1759893430,
|
||||
"narHash": "sha256-yAy4otLYm9iZ+NtQwTMEbqHwswSFUbhn7x826RR6djw=",
|
||||
"owner": "ipetkov",
|
||||
"repo": "crane",
|
||||
"rev": "e37c943371b73ed87faf33f7583860f81f1d5a48",
|
||||
"rev": "1979a2524cb8c801520bd94c38bb3d5692419d93",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -132,11 +131,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1748273445,
|
||||
"narHash": "sha256-5V0dzpNgQM0CHDsMzh+ludYeu1S+Y+IMjbaskSSdFh0=",
|
||||
"lastModified": 1754404745,
|
||||
"narHash": "sha256-BdbW/iTImczgcuATgQIa9sPGuYIBxVq2xqcvICsa2AQ=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "668a50d8b7bdb19a0131f53c9f6c25c9071e1ffb",
|
||||
"rev": "6563b21105168f90394dfaf58284b078af2d7275",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -153,11 +152,11 @@
|
||||
"rust-analyzer-src": "rust-analyzer-src"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1751525020,
|
||||
"narHash": "sha256-oDO6lCYS5Bf4jUITChj9XV7k3TP38DE0Ckz5n5ORCME=",
|
||||
"lastModified": 1760337631,
|
||||
"narHash": "sha256-3nvEN2lEpWtM1x7nfuiwpYHLNDgEUiWeBbyvy4vtVw8=",
|
||||
"owner": "nix-community",
|
||||
"repo": "fenix",
|
||||
"rev": "a1a5f92f47787e7df9f30e5e5ac13e679215aa1e",
|
||||
"rev": "fee7cf67cbd80a74460563388ac358b394014238",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -170,11 +169,11 @@
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1696426674,
|
||||
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
||||
"lastModified": 1747046372,
|
||||
"narHash": "sha256-CIVLLkVgvHYbgI2UpXvIIBJ12HWgX+fjA8Xf8PUmqCY=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
||||
"rev": "9100a0f413b0c601e0533d1d94ffd501ce2e7885",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -224,11 +223,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1722555600,
|
||||
"narHash": "sha256-XOQkdLafnb/p9ij77byFQjDf5m5QYl9b2REiVClC+x4=",
|
||||
"lastModified": 1751413152,
|
||||
"narHash": "sha256-Tyw1RjYEsp5scoigs1384gIg6e0GoBVjms4aXFfRssQ=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "8471fe90ad337a8074e957b69ca4d0089218391d",
|
||||
"rev": "77826244401ea9de6e3bac47c2db46005e1f30b5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -247,11 +246,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1712014858,
|
||||
"narHash": "sha256-sB4SWl2lX95bExY2gMFG5HIzvva5AVMJd4Igm+GpZNw=",
|
||||
"lastModified": 1733312601,
|
||||
"narHash": "sha256-4pDvzqnegAfRkPwO3wmwBhVi/Sye1mzps0zHWYnP88c=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "flake-parts",
|
||||
"rev": "9126214d0a59633752a136528f5f3b9aa8565b7d",
|
||||
"rev": "205b12d8b7cd4802fbcb8e8ef6a0f1408781a4f9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -292,11 +291,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1747372754,
|
||||
"narHash": "sha256-2Y53NGIX2vxfie1rOW0Qb86vjRZ7ngizoo+bnXU9D9k=",
|
||||
"lastModified": 1750779888,
|
||||
"narHash": "sha256-wibppH3g/E2lxU43ZQHC5yA/7kIKLGxVEnsnVK1BtRg=",
|
||||
"owner": "cachix",
|
||||
"repo": "git-hooks.nix",
|
||||
"rev": "80479b6ec16fefd9c1db3ea13aeb038c60530f46",
|
||||
"rev": "16ec914f6fb6f599ce988427d9d94efddf25fe6d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -327,31 +326,24 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"libgit2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1697646580,
|
||||
"narHash": "sha256-oX4Z3S9WtJlwvj0uH9HlYcWv+x1hqp8mhXl7HsLu2f0=",
|
||||
"owner": "libgit2",
|
||||
"repo": "libgit2",
|
||||
"rev": "45fd9ed7ae1a9b74b957ef4f337bc3c8b3df01b5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "libgit2",
|
||||
"repo": "libgit2",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"cachix",
|
||||
"devenv"
|
||||
"devenv",
|
||||
"flake-compat"
|
||||
],
|
||||
"flake-parts": "flake-parts_2",
|
||||
"libgit2": "libgit2",
|
||||
"nixpkgs": "nixpkgs_3",
|
||||
"git-hooks-nix": [
|
||||
"cachix",
|
||||
"devenv",
|
||||
"git-hooks"
|
||||
],
|
||||
"nixpkgs": [
|
||||
"cachix",
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-23-11": [
|
||||
"cachix",
|
||||
"devenv"
|
||||
@@ -359,34 +351,30 @@
|
||||
"nixpkgs-regression": [
|
||||
"cachix",
|
||||
"devenv"
|
||||
],
|
||||
"pre-commit-hooks": [
|
||||
"cachix",
|
||||
"devenv"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1745930071,
|
||||
"narHash": "sha256-bYyjarS3qSNqxfgc89IoVz8cAFDkF9yPE63EJr+h50s=",
|
||||
"owner": "domenkozar",
|
||||
"lastModified": 1752773918,
|
||||
"narHash": "sha256-dOi/M6yNeuJlj88exI+7k154z+hAhFcuB8tZktiW7rg=",
|
||||
"owner": "cachix",
|
||||
"repo": "nix",
|
||||
"rev": "b455edf3505f1bf0172b39a735caef94687d0d9c",
|
||||
"rev": "031c3cf42d2e9391eee373507d8c12e0f9606779",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "domenkozar",
|
||||
"ref": "devenv-2.24",
|
||||
"owner": "cachix",
|
||||
"ref": "devenv-2.30",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix-filter": {
|
||||
"locked": {
|
||||
"lastModified": 1731533336,
|
||||
"narHash": "sha256-oRam5PS1vcrr5UPgALW0eo1m/5/pls27Z/pabHNy2Ms=",
|
||||
"lastModified": 1757882181,
|
||||
"narHash": "sha256-+cCxYIh2UNalTz364p+QYmWHs0P+6wDhiWR4jDIKQIU=",
|
||||
"owner": "numtide",
|
||||
"repo": "nix-filter",
|
||||
"rev": "f7653272fd234696ae94229839a99b73c9ab7de0",
|
||||
"rev": "59c44d1909c72441144b93cf0f054be7fe764de5",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -404,11 +392,11 @@
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1729742964,
|
||||
"narHash": "sha256-B4mzTcQ0FZHdpeWcpDYPERtyjJd/NIuaQ9+BV1h+MpA=",
|
||||
"lastModified": 1737420293,
|
||||
"narHash": "sha256-F1G5ifvqTpJq7fdkT34e/Jy9VCyzd5XfJ9TO8fHhJWE=",
|
||||
"owner": "nix-community",
|
||||
"repo": "nix-github-actions",
|
||||
"rev": "e04df33f62cdcf93d73e9a04142464753a16db67",
|
||||
"rev": "f4158fa080ef4503c8f4c820967d946c2af31ec9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -419,11 +407,11 @@
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1726042813,
|
||||
"narHash": "sha256-LnNKCCxnwgF+575y0pxUdlGZBO/ru1CtGHIqQVfvjlA=",
|
||||
"lastModified": 1751949589,
|
||||
"narHash": "sha256-mgFxAPLWw0Kq+C8P3dRrZrOYEQXOtKuYVlo9xvPntt8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "159be5db480d1df880a0135ca0bfed84c2f88353",
|
||||
"rev": "9b008d60392981ad674e04016d25619281550a9d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -435,27 +423,27 @@
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1724316499,
|
||||
"narHash": "sha256-Qb9MhKBUTCfWg/wqqaxt89Xfi6qTD3XpTzQ9eXi3JmE=",
|
||||
"lastModified": 1751741127,
|
||||
"narHash": "sha256-t75Shs76NgxjZSgvvZZ9qOmz5zuBE8buUaYD28BMTxg=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "797f7dc49e0bc7fab4b57c021cdf68f595e47841",
|
||||
"rev": "29e290002bfff26af1db6f64d070698019460302",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-24.05",
|
||||
"ref": "nixos-25.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1733212471,
|
||||
"narHash": "sha256-M1+uCoV5igihRfcUKrr1riygbe73/dzNnzPsmaLCmpo=",
|
||||
"lastModified": 1754214453,
|
||||
"narHash": "sha256-Q/I2xJn/j1wpkGhWkQnm20nShYnG7TI99foDBpXm1SY=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "55d15ad12a74eb7d4646254e13638ad0c4128776",
|
||||
"rev": "5b09dc45f24cf32316283e62aec81ffee3c3e376",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -467,43 +455,11 @@
|
||||
},
|
||||
"nixpkgs_3": {
|
||||
"locked": {
|
||||
"lastModified": 1717432640,
|
||||
"narHash": "sha256-+f9c4/ZX5MWDOuB1rKoWj+lBNm0z0rs4CK47HBLxy1o=",
|
||||
"lastModified": 1760256791,
|
||||
"narHash": "sha256-uTpzDHRASEDeFUuToWSQ46Re8beXyG9dx4W36FQa0/c=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "88269ab3044128b7c2f4c7d68448b2fb50456870",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "release-24.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_4": {
|
||||
"locked": {
|
||||
"lastModified": 1748190013,
|
||||
"narHash": "sha256-R5HJFflOfsP5FBtk+zE8FpL8uqE7n62jqOsADvVshhE=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "62b852f6c6742134ade1abdd2a21685fd617a291",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_5": {
|
||||
"locked": {
|
||||
"lastModified": 1751498133,
|
||||
"narHash": "sha256-QWJ+NQbMU+NcU2xiyo7SNox1fAuwksGlQhpzBl76g1I=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "d55716bb59b91ae9d1ced4b1ccdea7a442ecbfdb",
|
||||
"rev": "832e3b6db48508ae436c2c7bfc0cf914eac6938e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
@@ -513,23 +469,6 @@
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"rocksdb": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1741308171,
|
||||
"narHash": "sha256-YdBvdQ75UJg5ffwNjxizpviCVwVDJnBkM8ZtGIduMgY=",
|
||||
"ref": "v9.11.1",
|
||||
"rev": "3ce04794bcfbbb0d2e6f81ae35fc4acf688b6986",
|
||||
"revCount": 13177,
|
||||
"type": "git",
|
||||
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
|
||||
},
|
||||
"original": {
|
||||
"ref": "v9.11.1",
|
||||
"type": "git",
|
||||
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"attic": "attic",
|
||||
@@ -539,18 +478,17 @@
|
||||
"flake-compat": "flake-compat_3",
|
||||
"flake-utils": "flake-utils",
|
||||
"nix-filter": "nix-filter",
|
||||
"nixpkgs": "nixpkgs_5",
|
||||
"rocksdb": "rocksdb"
|
||||
"nixpkgs": "nixpkgs_3"
|
||||
}
|
||||
},
|
||||
"rust-analyzer-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1751433876,
|
||||
"narHash": "sha256-IsdwOcvLLDDlkFNwhdD5BZy20okIQL01+UQ7Kxbqh8s=",
|
||||
"lastModified": 1760260966,
|
||||
"narHash": "sha256-pOVvZz/aa+laeaUKyE6PtBevdo4rywMwjhWdSZE/O1c=",
|
||||
"owner": "rust-lang",
|
||||
"repo": "rust-analyzer",
|
||||
"rev": "11d45c881389dae90b0da5a94cde52c79d0fc7ef",
|
||||
"rev": "c5181dbbe33af6f21b9d83e02fdb6fda298a3b65",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
|
||||
30
flake.nix
30
flake.nix
@@ -16,10 +16,6 @@
|
||||
flake-utils.url = "github:numtide/flake-utils?ref=main";
|
||||
nix-filter.url = "github:numtide/nix-filter?ref=main";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
|
||||
rocksdb = {
|
||||
url = "git+https://forgejo.ellis.link/continuwuation/rocksdb?ref=v9.11.1";
|
||||
flake = false;
|
||||
};
|
||||
};
|
||||
|
||||
outputs =
|
||||
@@ -31,20 +27,24 @@
|
||||
inherit system;
|
||||
};
|
||||
|
||||
fnx = inputs.fenix.packages.${system};
|
||||
# The Rust toolchain to use
|
||||
toolchain = inputs.fenix.packages.${system}.fromToolchainFile {
|
||||
file = ./rust-toolchain.toml;
|
||||
toolchain = fnx.combine [
|
||||
(fnx.fromToolchainFile {
|
||||
file = ./rust-toolchain.toml;
|
||||
|
||||
# See also `rust-toolchain.toml`
|
||||
sha256 = "sha256-KUm16pHj+cRedf8vxs/Hd2YWxpOrWZ7UOrwhILdSJBU=";
|
||||
};
|
||||
# See also `rust-toolchain.toml`
|
||||
sha256 = "sha256-+9FmLhAOezBZCOziO0Qct1NOrfpjNsXxc/8I0c7BdKE=";
|
||||
})
|
||||
fnx.complete.rustfmt
|
||||
];
|
||||
|
||||
mkScope =
|
||||
pkgs:
|
||||
pkgs.lib.makeScope pkgs.newScope (self: {
|
||||
inherit pkgs inputs;
|
||||
craneLib = (inputs.crane.mkLib pkgs).overrideToolchain (_: toolchain);
|
||||
main = self.callPackage ./nix/pkgs/main { };
|
||||
main = self.callPackage ./pkg/nix/pkgs/main { };
|
||||
liburing = pkgs.liburing.overrideAttrs {
|
||||
# Tests weren't building
|
||||
outputs = [
|
||||
@@ -61,8 +61,14 @@
|
||||
inherit (self) liburing;
|
||||
}).overrideAttrs
|
||||
(old: {
|
||||
src = inputs.rocksdb;
|
||||
version = "v9.11.1";
|
||||
src = pkgsHost.fetchFromGitea {
|
||||
domain = "forgejo.ellis.link";
|
||||
owner = "continuwuation";
|
||||
repo = "rocksdb";
|
||||
rev = "10.5.fb";
|
||||
sha256 = "sha256-X4ApGLkHF9ceBtBg77dimEpu720I79ffLoyPa8JMHaU=";
|
||||
};
|
||||
version = "v10.5.fb";
|
||||
cmakeFlags =
|
||||
pkgs.lib.subtractLists [
|
||||
# No real reason to have snappy or zlib, no one uses this
|
||||
|
||||
@@ -1,25 +1,24 @@
|
||||
[Unit]
|
||||
|
||||
Description=Continuwuity - Matrix homeserver
|
||||
Documentation=https://continuwuity.org/
|
||||
Wants=network-online.target
|
||||
After=network-online.target
|
||||
Documentation=https://continuwuity.org/
|
||||
Alias=matrix-conduwuit.service
|
||||
|
||||
[Service]
|
||||
DynamicUser=yes
|
||||
User=conduwuit
|
||||
Group=conduwuit
|
||||
Type=notify
|
||||
Type=notify-reload
|
||||
ReloadSignal=SIGUSR1
|
||||
|
||||
Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml"
|
||||
|
||||
Environment="CONTINUWUITY_LOG_TO_JOURNALD=true"
|
||||
Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N"
|
||||
Environment="CONTINUWUITY_DATABASE_PATH=/var/lib/conduwuit"
|
||||
|
||||
ExecStart=/usr/sbin/conduwuit
|
||||
|
||||
ReadWritePaths=/var/lib/conduwuit /etc/conduwuit
|
||||
ExecStart=/usr/bin/conduwuit
|
||||
|
||||
AmbientCapabilities=
|
||||
CapabilityBoundingSet=
|
||||
@@ -52,16 +51,17 @@ SystemCallArchitectures=native
|
||||
SystemCallFilter=@system-service @resources
|
||||
SystemCallFilter=~@clock @debug @module @mount @reboot @swap @cpu-emulation @obsolete @timer @chown @setuid @privileged @keyring @ipc
|
||||
SystemCallErrorNumber=EPERM
|
||||
#StateDirectory=conduwuit
|
||||
|
||||
StateDirectory=conduwuit
|
||||
ConfigurationDirectory=conduwuit
|
||||
RuntimeDirectory=conduwuit
|
||||
RuntimeDirectoryMode=0750
|
||||
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
|
||||
TimeoutStopSec=2m
|
||||
TimeoutStartSec=2m
|
||||
TimeoutStopSec=4m
|
||||
TimeoutStartSec=4m
|
||||
|
||||
StartLimitInterval=1m
|
||||
StartLimitBurst=5
|
||||
@@ -1,12 +1,28 @@
|
||||
# Continuwuity for Debian
|
||||
|
||||
This document provides information about downloading and deploying the Debian package. You can also use this guide for other `apt`-based distributions such as Ubuntu.
|
||||
This document provides information about downloading and deploying the Debian package. You can also use this guide for other deb-based distributions such as Ubuntu.
|
||||
|
||||
### Installation
|
||||
|
||||
See the [generic deployment guide](../deploying/generic.md) for additional information about using the Debian package.
|
||||
To add the Continuwuation apt repository:
|
||||
```bash
|
||||
# Replace with `"dev"` for bleeding-edge builds at your own risk
|
||||
export COMPONENT="stable"
|
||||
# Import the Continuwuation signing key
|
||||
sudo curl https://forgejo.ellis.link/api/packages/continuwuation/debian/repository.key -o /etc/apt/keyrings/forgejo-continuwuation.asc
|
||||
# Add a new apt source list pointing to the repository
|
||||
echo "deb [signed-by=/etc/apt/keyrings/forgejo-continuwuation.asc] https://forgejo.ellis.link/api/packages/continuwuation/debian $(lsb_release -sc) $COMPONENT" | sudo tee /etc/apt/sources.list.d/continuwuation.list
|
||||
# Update remote package lists
|
||||
sudo apt update
|
||||
```
|
||||
|
||||
No `apt` repository is currently available. This feature is in development.
|
||||
To install continuwuity:
|
||||
```bash
|
||||
sudo apt install continuwuity
|
||||
```
|
||||
The `continuwuity` package conflicts with the old `conduwuit` package and will remove it automatically when installed.
|
||||
|
||||
See the [generic deployment guide](../deploying/generic.md) for additional information about using the Debian package.
|
||||
|
||||
### Configuration
|
||||
|
||||
@@ -16,7 +32,7 @@ ### Configuration
|
||||
|
||||
### Running
|
||||
|
||||
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary installs at `/usr/sbin/conduwuit`.
|
||||
The package uses the [`conduwuit.service`](../configuration/examples.md#example-systemd-unit-file) systemd unit file to start and stop Continuwuity. The binary installs at `/usr/bin/conduwuit`.
|
||||
|
||||
By default, this package assumes that Continuwuity runs behind a reverse proxy. The default configuration options apply (listening on `localhost` and TCP port `6167`). Matrix federation requires a valid domain name and TLS. To federate properly, you must set up TLS certificates and certificate renewal.
|
||||
|
||||
20
pkg/debian/postinst
Normal file
20
pkg/debian/postinst
Normal file
@@ -0,0 +1,20 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# TODO: implement debconf support that is maintainable without duplicating the config
|
||||
#. /usr/share/debconf/confmodule
|
||||
|
||||
CONDUWUIT_DATABASE_PATH=/var/lib/conduwuit
|
||||
CONDUWUIT_CONFIG_PATH=/etc/conduwuit
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
echo ''
|
||||
echo 'Make sure you edit the example config at /etc/conduwuit/conduwuit.toml before starting!'
|
||||
echo 'To start the server, run: systemctl start conduwuit.service'
|
||||
echo ''
|
||||
|
||||
;;
|
||||
esac
|
||||
|
||||
#DEBHELPER#
|
||||
@@ -20,24 +20,18 @@ case $1 in
|
||||
|
||||
if [ -d "$CONDUWUIT_CONFIG_PATH" ]; then
|
||||
if test -L "$CONDUWUIT_CONFIG_PATH"; then
|
||||
echo "Deleting conduwuit configuration files"
|
||||
echo "Deleting continuwuity configuration files"
|
||||
rm -v -r "$CONDUWUIT_CONFIG_PATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d "$CONDUWUIT_DATABASE_PATH" ]; then
|
||||
if test -L "$CONDUWUIT_DATABASE_PATH"; then
|
||||
echo "Deleting conduwuit database directory"
|
||||
echo "Deleting continuwuity database directory"
|
||||
rm -r "$CONDUWUIT_DATABASE_PATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -d "$CONDUWUIT_DATABASE_PATH_SYMLINK" ]; then
|
||||
if test -L "$CONDUWUIT_DATABASE_SYMLINK"; then
|
||||
echo "Removing matrix-conduit symlink"
|
||||
rm -r "$CONDUWUIT_DATABASE_PATH_SYMLINK"
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
79
pkg/fedora/continuwuity.spec.rpkg
Normal file
79
pkg/fedora/continuwuity.spec.rpkg
Normal file
@@ -0,0 +1,79 @@
|
||||
# This should be run using rpkg: https://docs.pagure.org/rpkg
|
||||
# it requires Internet access and is not suitable for Fedora main repos
|
||||
|
||||
Name: continuwuity
|
||||
Version: {{{ git_repo_version }}}
|
||||
Release: 1%{?dist}
|
||||
Summary: Very cool Matrix chat homeserver written in Rust
|
||||
|
||||
License: Apache-2.0 AND MIT
|
||||
|
||||
URL: https://continuwuity.org
|
||||
VCS: {{{ git_repo_vcs }}}
|
||||
Source: {{{ git_repo_pack }}}
|
||||
|
||||
BuildRequires: cargo-rpm-macros >= 25
|
||||
BuildRequires: systemd-rpm-macros
|
||||
# Needed to build rust-librocksdb-sys
|
||||
BuildRequires: clang
|
||||
BuildRequires: liburing-devel
|
||||
|
||||
Requires: liburing
|
||||
Requires: glibc
|
||||
Requires: libstdc++
|
||||
|
||||
%global _description %{expand:
|
||||
A cool hard fork of Conduit, a Matrix homeserver written in Rust}
|
||||
|
||||
%description %{_description}
|
||||
|
||||
%prep
|
||||
{{{ git_repo_setup_macro }}}
|
||||
%cargo_prep -N
|
||||
# Perform an online build so Git dependencies can be retrieved
|
||||
sed -i 's/^offline = true$//' .cargo/config.toml
|
||||
|
||||
%build
|
||||
%cargo_build
|
||||
|
||||
# Here's the one legally required mystery incantation in this file.
|
||||
# Some of our dependencies have source files which are (for some reason) marked as executable.
|
||||
# Files in .cargo/registry/ are copied into /usr/src/ by the debuginfo machinery
|
||||
# at the end of the build step, and then the BRP shebang mangling script checks
|
||||
# the entire buildroot to find executable files, and fails the build because
|
||||
# it thinks Rust's file attributes are shebangs because they start with `#!`.
|
||||
# So we have to clear the executable bit on all of them before that happens.
|
||||
find .cargo/registry/ -executable -name "*.rs" -exec chmod -x {} +
|
||||
|
||||
# TODO: this fails currently because it's forced to run in offline mode
|
||||
# {cargo_license -- --no-dev} > LICENSE.dependencies
|
||||
|
||||
%install
|
||||
install -Dpm0755 target/rpm/conduwuit -t %{buildroot}%{_bindir}
|
||||
install -Dpm0644 pkg/conduwuit.service -t %{buildroot}%{_unitdir}
|
||||
install -Dpm0644 conduwuit-example.toml %{buildroot}%{_sysconfdir}/conduwuit/conduwuit.toml
|
||||
|
||||
%files
|
||||
%license LICENSE
|
||||
%license src/core/matrix/state_res/LICENSE
|
||||
%doc CODE_OF_CONDUCT.md
|
||||
%doc CONTRIBUTING.md
|
||||
%doc README.md
|
||||
%doc SECURITY.md
|
||||
%config %{_sysconfdir}/conduwuit/conduwuit.toml
|
||||
|
||||
%{_bindir}/conduwuit
|
||||
%{_unitdir}/conduwuit.service
|
||||
# Do not create /var/lib/conduwuit, systemd will create it if necessary
|
||||
|
||||
%post
|
||||
%systemd_post conduwuit.service
|
||||
|
||||
%preun
|
||||
%systemd_preun conduwuit.service
|
||||
|
||||
%postun
|
||||
%systemd_postun_with_restart conduwuit.service
|
||||
|
||||
%changelog
|
||||
{{{ git_repo_changelog }}}
|
||||
104
renovate.json
104
renovate.json
@@ -1,26 +1,84 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"config:recommended"
|
||||
],
|
||||
"lockFileMaintenance": {
|
||||
"enabled": true,
|
||||
"schedule": [
|
||||
"at any time"
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": ["config:recommended", "replacements:all"],
|
||||
"osvVulnerabilityAlerts": true,
|
||||
"lockFileMaintenance": {
|
||||
"enabled": true,
|
||||
"schedule": ["at any time"]
|
||||
},
|
||||
"platformAutomerge": true,
|
||||
"nix": {
|
||||
"enabled": true
|
||||
},
|
||||
"labels": ["Dependencies", "Dependencies/Renovate"],
|
||||
"ignoreDeps": [
|
||||
"tikv-jemallocator",
|
||||
"tikv-jemalloc-sys",
|
||||
"tikv-jemalloc-ctl",
|
||||
"opentelemetry",
|
||||
"opentelemetry_sdk",
|
||||
"opentelemetry-jaeger",
|
||||
"tracing-opentelemetry"
|
||||
],
|
||||
"github-actions": {
|
||||
"enabled": true,
|
||||
"managerFilePatterns": [
|
||||
"/(^|/)\\.forgejo/workflows/[^/]+\\.ya?ml$/",
|
||||
"/(^|/)\\.forgejo/actions/[^/]+/action\\.ya?ml$/",
|
||||
"/(^|/)\\.github/workflows/[^/]+\\.ya?ml$/",
|
||||
"/(^|/)\\.github/actions/[^/]+/action\\.ya?ml$/"
|
||||
]
|
||||
},
|
||||
"packageRules": [
|
||||
{
|
||||
"description": "Batch patch-level Rust dependency updates",
|
||||
"matchManagers": ["cargo"],
|
||||
"matchUpdateTypes": ["patch"],
|
||||
"groupName": "rust-patch-updates"
|
||||
},
|
||||
{
|
||||
"description": "Limit concurrent Cargo PRs",
|
||||
"matchManagers": ["cargo"],
|
||||
"prConcurrentLimit": 5
|
||||
},
|
||||
{
|
||||
"description": "Group Rust toolchain updates into a single PR",
|
||||
"matchManagers": ["custom.regex"],
|
||||
"matchPackageNames": ["rust", "rustc", "cargo"],
|
||||
"groupName": "rust-toolchain"
|
||||
},
|
||||
{
|
||||
"description": "Batch minor and patch GitHub Actions updates",
|
||||
"matchManagers": ["github-actions"],
|
||||
"matchUpdateTypes": ["minor", "patch"],
|
||||
"groupName": "github-actions-non-major"
|
||||
},
|
||||
{
|
||||
"description": "Pin forgejo artifact actions to prevent breaking changes",
|
||||
"matchManagers": ["github-actions"],
|
||||
"matchPackageNames": ["forgejo/upload-artifact", "forgejo/download-artifact"],
|
||||
"enabled": false
|
||||
},
|
||||
{
|
||||
"description": "Auto-merge renovatebot docker image updates",
|
||||
"matchDatasources": ["docker"],
|
||||
"matchPackageNames": ["ghcr.io/renovatebot/renovate"],
|
||||
"automerge": true,
|
||||
"automergeStrategy": "fast-forward",
|
||||
"extends": ["schedule:earlyMondays"]
|
||||
}
|
||||
],
|
||||
"customManagers": [
|
||||
{
|
||||
"customType": "regex",
|
||||
"description": "Update _VERSION variables in Dockerfiles",
|
||||
"managerFilePatterns": [
|
||||
"/(^|/)([Dd]ocker|[Cc]ontainer)file[^/]*$/",
|
||||
"/(^|/|\\.)([Dd]ocker|[Cc]ontainer)file$/"
|
||||
],
|
||||
"matchStrings": [
|
||||
"# renovate: datasource=(?<datasource>[a-zA-Z0-9-._]+?) depName=(?<depName>[^\\s]+?)(?: (lookupName|packageName)=(?<packageName>[^\\s]+?))?(?: versioning=(?<versioning>[^\\s]+?))?(?: extractVersion=(?<extractVersion>[^\\s]+?))?(?: registryUrl=(?<registryUrl>[^\\s]+?))?\\s+(?:ENV\\s+|ARG\\s+)?[A-Za-z0-9_]+?_VERSION[ =][\"']?(?<currentValue>.+?)[\"']?\\s+(?:(?:ENV\\s+|ARG\\s+)?[A-Za-z0-9_]+?_CHECKSUM[ =][\"']?(?<currentDigest>.+?)[\"']?\\s)?"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"nix": {
|
||||
"enabled": true
|
||||
},
|
||||
"labels": [
|
||||
"dependencies",
|
||||
"github_actions"
|
||||
],
|
||||
"ignoreDeps": [
|
||||
"tikv-jemllocator",
|
||||
"tikv-jemalloc-sys",
|
||||
"tikv-jemalloc-ctl",
|
||||
"opentelemetry-rust",
|
||||
"tracing-opentelemetry"
|
||||
]
|
||||
}
|
||||
|
||||
@@ -9,13 +9,16 @@
|
||||
# If you're having trouble making the relevant changes, bug a maintainer.
|
||||
|
||||
[toolchain]
|
||||
channel = "1.87.0"
|
||||
profile = "minimal"
|
||||
channel = "1.89.0"
|
||||
components = [
|
||||
# For rust-analyzer
|
||||
"rust-src",
|
||||
"rust-analyzer",
|
||||
# For CI and editors
|
||||
"rustfmt",
|
||||
"clippy",
|
||||
# you have to install rustfmt nightly yourself (if you're not on NixOS)
|
||||
#
|
||||
# The rust-toolchain.toml file doesn't provide any syntax for specifying components from different toolchains
|
||||
# "rustfmt"
|
||||
]
|
||||
|
||||
@@ -85,10 +85,11 @@ futures.workspace = true
|
||||
log.workspace = true
|
||||
ruma.workspace = true
|
||||
serde_json.workspace = true
|
||||
serde_yaml.workspace = true
|
||||
serde_yml.workspace = true
|
||||
tokio.workspace = true
|
||||
tracing-subscriber.workspace = true
|
||||
tracing.workspace = true
|
||||
ctor.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -16,7 +16,7 @@ pub(super) async fn register(&self) -> Result {
|
||||
|
||||
let range = 1..checked!(body_len - 1)?;
|
||||
let appservice_config_body = body[range].join("\n");
|
||||
let parsed_config = serde_yaml::from_str(&appservice_config_body);
|
||||
let parsed_config = serde_yml::from_str(&appservice_config_body);
|
||||
match parsed_config {
|
||||
| Err(e) => return Err!("Could not parse appservice config as YAML: {e}"),
|
||||
| Ok(registration) => match self
|
||||
@@ -57,7 +57,7 @@ pub(super) async fn show_appservice_config(&self, appservice_identifier: String)
|
||||
{
|
||||
| None => return Err!("Appservice does not exist."),
|
||||
| Some(config) => {
|
||||
let config_str = serde_yaml::to_string(&config)?;
|
||||
let config_str = serde_yml::to_string(&config)?;
|
||||
write!(self, "Config for {appservice_identifier}:\n\n```yaml\n{config_str}\n```")
|
||||
},
|
||||
}
|
||||
|
||||
@@ -281,15 +281,8 @@ pub(super) async fn get_remote_pdu(
|
||||
vec![(event_id, value, room_id)]
|
||||
};
|
||||
|
||||
info!("Attempting to handle event ID {event_id} as backfilled PDU");
|
||||
self.services
|
||||
.rooms
|
||||
.timeline
|
||||
.backfill_pdu(&server, response.pdu)
|
||||
.await?;
|
||||
|
||||
let text = serde_json::to_string_pretty(&json)?;
|
||||
let msg = "Got PDU from specified server and handled as backfilled";
|
||||
let msg = "Got PDU from specified server:";
|
||||
write!(self, "{msg}. Event body:\n```json\n{text}\n```")
|
||||
},
|
||||
}
|
||||
@@ -639,6 +632,7 @@ pub(super) async fn force_set_room_state_from_server(
|
||||
.add_pdu_outlier(&event_id, &value);
|
||||
}
|
||||
|
||||
info!("Resolving new room state");
|
||||
let new_room_state = self
|
||||
.services
|
||||
.rooms
|
||||
@@ -646,7 +640,7 @@ pub(super) async fn force_set_room_state_from_server(
|
||||
.resolve_state(&room_id, &room_version, state)
|
||||
.await?;
|
||||
|
||||
info!("Forcing new room state");
|
||||
info!("Compressing new room state");
|
||||
let HashSetCompressStateEvent {
|
||||
shortstatehash: short_state_hash,
|
||||
added,
|
||||
@@ -660,6 +654,7 @@ pub(super) async fn force_set_room_state_from_server(
|
||||
|
||||
let state_lock = self.services.rooms.state.mutex.lock(&*room_id).await;
|
||||
|
||||
info!("Forcing new room state");
|
||||
self.services
|
||||
.rooms
|
||||
.state
|
||||
|
||||
@@ -29,6 +29,8 @@
|
||||
|
||||
pub(crate) const PAGE_SIZE: usize = 100;
|
||||
|
||||
use ctor::{ctor, dtor};
|
||||
|
||||
conduwuit::mod_ctor! {}
|
||||
conduwuit::mod_dtor! {}
|
||||
conduwuit::rustc_flags_capture! {}
|
||||
|
||||
@@ -57,5 +57,5 @@ pub(super) async fn pdus(
|
||||
.try_collect()
|
||||
.await?;
|
||||
|
||||
self.write_str(&format!("{result:#?}")).await
|
||||
self.write_str(&format!("```\n{result:#?}\n```")).await
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use std::{collections::BTreeMap, fmt::Write as _};
|
||||
|
||||
use api::client::{
|
||||
full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, update_avatar_url,
|
||||
update_displayname,
|
||||
full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room,
|
||||
update_avatar_url, update_displayname,
|
||||
};
|
||||
use conduwuit::{
|
||||
Err, Result, debug, debug_warn, error, info, is_equal_to,
|
||||
@@ -68,7 +68,8 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
||||
// Create user
|
||||
self.services
|
||||
.users
|
||||
.create(&user_id, Some(password.as_str()))?;
|
||||
.create(&user_id, Some(password.as_str()), None)
|
||||
.await?;
|
||||
|
||||
// Default to pretty displayname
|
||||
let mut displayname = user_id.localpart().to_owned();
|
||||
@@ -178,7 +179,11 @@ pub(super) async fn create_user(&self, username: String, password: Option<String
|
||||
.await
|
||||
.is_ok_and(is_equal_to!(1))
|
||||
{
|
||||
self.services.admin.make_user_admin(&user_id).await?;
|
||||
self.services
|
||||
.admin
|
||||
.make_user_admin(&user_id)
|
||||
.boxed()
|
||||
.await?;
|
||||
warn!("Granting {user_id} admin privileges as the first user");
|
||||
}
|
||||
} else {
|
||||
@@ -216,7 +221,9 @@ pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) ->
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?;
|
||||
full_user_deactivate(self.services, &user_id, &all_joined_rooms)
|
||||
.boxed()
|
||||
.await?;
|
||||
update_displayname(self.services, &user_id, None, &all_joined_rooms).await;
|
||||
update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms).await;
|
||||
leave_all_rooms(self.services, &user_id).await;
|
||||
@@ -284,6 +291,7 @@ pub(super) async fn reset_password(&self, username: String, password: Option<Str
|
||||
.services
|
||||
.users
|
||||
.set_password(&user_id, Some(new_password.as_str()))
|
||||
.await
|
||||
{
|
||||
| Err(e) => return Err!("Couldn't reset the password for user {user_id}: {e}"),
|
||||
| Ok(()) => {
|
||||
@@ -374,7 +382,9 @@ pub(super) async fn deactivate_all(&self, no_leave_rooms: bool, force: bool) ->
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
full_user_deactivate(self.services, &user_id, &all_joined_rooms).await?;
|
||||
full_user_deactivate(self.services, &user_id, &all_joined_rooms)
|
||||
.boxed()
|
||||
.await?;
|
||||
update_displayname(self.services, &user_id, None, &all_joined_rooms).await;
|
||||
update_avatar_url(self.services, &user_id, None, None, &all_joined_rooms)
|
||||
.await;
|
||||
@@ -754,7 +764,7 @@ pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAli
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(String::new(), &power_levels_content),
|
||||
&user_id,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
@@ -774,7 +784,11 @@ pub(super) async fn make_user_admin(&self, user_id: String) -> Result {
|
||||
"Parsed user_id must be a local user"
|
||||
);
|
||||
|
||||
self.services.admin.make_user_admin(&user_id).await?;
|
||||
self.services
|
||||
.admin
|
||||
.make_user_admin(&user_id)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
self.write_str(&format!("{user_id} has been granted admin privileges.",))
|
||||
.await
|
||||
@@ -899,7 +913,13 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
|
||||
);
|
||||
|
||||
let redaction_event_id = {
|
||||
let state_lock = self.services.rooms.state.mutex.lock(event.room_id()).await;
|
||||
let state_lock = self
|
||||
.services
|
||||
.rooms
|
||||
.state
|
||||
.mutex
|
||||
.lock(&event.room_id_or_hash())
|
||||
.await;
|
||||
|
||||
self.services
|
||||
.rooms
|
||||
@@ -913,7 +933,7 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
|
||||
})
|
||||
},
|
||||
event.sender(),
|
||||
event.room_id(),
|
||||
Some(&event.room_id_or_hash()),
|
||||
&state_lock,
|
||||
)
|
||||
.await?
|
||||
@@ -924,3 +944,29 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result {
|
||||
))
|
||||
.await
|
||||
}
|
||||
|
||||
#[admin_command]
|
||||
pub(super) async fn force_leave_remote_room(
|
||||
&self,
|
||||
user_id: String,
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
) -> Result {
|
||||
let user_id = parse_local_user_id(self.services, &user_id)?;
|
||||
let (room_id, _) = self
|
||||
.services
|
||||
.rooms
|
||||
.alias
|
||||
.resolve_with_servers(&room_id, None)
|
||||
.await?;
|
||||
|
||||
assert!(
|
||||
self.services.globals.user_is_local(&user_id),
|
||||
"Parsed user_id must be a local user"
|
||||
);
|
||||
remote_leave_room(self.services, &user_id, &room_id, None)
|
||||
.boxed()
|
||||
.await?;
|
||||
|
||||
self.write_str(&format!("{user_id} has been joined to {room_id}.",))
|
||||
.await
|
||||
}
|
||||
|
||||
@@ -103,6 +103,12 @@ pub enum UserCommand {
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
},
|
||||
|
||||
/// - Manually leave a remote room for a local user.
|
||||
ForceLeaveRemoteRoom {
|
||||
user_id: String,
|
||||
room_id: OwnedRoomOrAliasId,
|
||||
},
|
||||
|
||||
/// - Forces the specified user to drop their power levels to the room
|
||||
/// default, if their permissions allow and the auth check permits
|
||||
ForceDemote {
|
||||
|
||||
@@ -49,6 +49,9 @@ jemalloc_stats = [
|
||||
"conduwuit-core/jemalloc_stats",
|
||||
"conduwuit-service/jemalloc_stats",
|
||||
]
|
||||
ldap = [
|
||||
"conduwuit-service/ldap"
|
||||
]
|
||||
release_max_log_level = [
|
||||
"conduwuit-core/release_max_log_level",
|
||||
"conduwuit-service/release_max_log_level",
|
||||
@@ -90,6 +93,7 @@ serde.workspace = true
|
||||
sha1.workspace = true
|
||||
tokio.workspace = true
|
||||
tracing.workspace = true
|
||||
ctor.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -373,7 +373,7 @@ pub(crate) async fn register_route(
|
||||
let password = if is_guest { None } else { body.password.as_deref() };
|
||||
|
||||
// Create user
|
||||
services.users.create(&user_id, password)?;
|
||||
services.users.create(&user_id, password, None).await?;
|
||||
|
||||
// Default to pretty displayname
|
||||
let mut displayname = user_id.localpart().to_owned();
|
||||
@@ -405,41 +405,36 @@ pub(crate) async fn register_route(
|
||||
)
|
||||
.await?;
|
||||
|
||||
if (!is_guest && body.inhibit_login)
|
||||
// Generate new device id if the user didn't specify one
|
||||
let no_device = body.inhibit_login
|
||||
|| body
|
||||
.appservice_info
|
||||
.as_ref()
|
||||
.is_some_and(|appservice| appservice.registration.device_management)
|
||||
{
|
||||
return Ok(register::v3::Response {
|
||||
access_token: None,
|
||||
user_id,
|
||||
device_id: None,
|
||||
refresh_token: None,
|
||||
expires_in: None,
|
||||
});
|
||||
}
|
||||
.is_some_and(|aps| aps.registration.device_management);
|
||||
let (token, device) = if !no_device {
|
||||
// Don't create a device for inhibited logins
|
||||
let device_id = if is_guest { None } else { body.device_id.clone() }
|
||||
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
||||
|
||||
// Generate new device id if the user didn't specify one
|
||||
let device_id = if is_guest { None } else { body.device_id.clone() }
|
||||
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
||||
// Generate new token for the device
|
||||
let new_token = utils::random_string(TOKEN_LENGTH);
|
||||
|
||||
// Generate new token for the device
|
||||
let token = utils::random_string(TOKEN_LENGTH);
|
||||
|
||||
// Create device for this account
|
||||
services
|
||||
.users
|
||||
.create_device(
|
||||
&user_id,
|
||||
&device_id,
|
||||
&token,
|
||||
body.initial_device_display_name.clone(),
|
||||
Some(client.to_string()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
debug_info!(%user_id, %device_id, "User account was created");
|
||||
// Create device for this account
|
||||
services
|
||||
.users
|
||||
.create_device(
|
||||
&user_id,
|
||||
&device_id,
|
||||
&new_token,
|
||||
body.initial_device_display_name.clone(),
|
||||
Some(client.to_string()),
|
||||
)
|
||||
.await?;
|
||||
debug_info!(%user_id, %device_id, "User account was created");
|
||||
(Some(new_token), Some(device_id))
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let device_display_name = body.initial_device_display_name.as_deref().unwrap_or("");
|
||||
|
||||
@@ -505,7 +500,7 @@ pub(crate) async fn register_route(
|
||||
.await
|
||||
.is_ok_and(is_equal_to!(1))
|
||||
{
|
||||
services.admin.make_user_admin(&user_id).await?;
|
||||
services.admin.make_user_admin(&user_id).boxed().await?;
|
||||
warn!("Granting {user_id} admin privileges as the first user");
|
||||
} else if services.config.suspend_on_register {
|
||||
// This is not an admin, suspend them.
|
||||
@@ -583,9 +578,9 @@ pub(crate) async fn register_route(
|
||||
}
|
||||
|
||||
Ok(register::v3::Response {
|
||||
access_token: Some(token),
|
||||
access_token: token,
|
||||
user_id,
|
||||
device_id: Some(device_id),
|
||||
device_id: device,
|
||||
refresh_token: None,
|
||||
expires_in: None,
|
||||
})
|
||||
@@ -659,7 +654,8 @@ pub(crate) async fn change_password_route(
|
||||
|
||||
services
|
||||
.users
|
||||
.set_password(sender_user, Some(&body.new_password))?;
|
||||
.set_password(sender_user, Some(&body.new_password))
|
||||
.await?;
|
||||
|
||||
if body.logout_devices {
|
||||
// Logout all devices except the current one
|
||||
@@ -928,7 +924,7 @@ pub async fn full_user_deactivate(
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(String::new(), &power_levels_content),
|
||||
user_id,
|
||||
room_id,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await
|
||||
|
||||
3
src/api/client/admin/mod.rs
Normal file
3
src/api/client/admin/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
mod suspend;
|
||||
|
||||
pub(crate) use self::suspend::*;
|
||||
89
src/api/client/admin/suspend.rs
Normal file
89
src/api/client/admin/suspend.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use axum::extract::State;
|
||||
use conduwuit::{Err, Result};
|
||||
use futures::future::{join, join3};
|
||||
use ruma::api::client::admin::{get_suspended, set_suspended};
|
||||
|
||||
use crate::Ruma;
|
||||
|
||||
/// # `GET /_matrix/client/v1/admin/suspend/{userId}`
|
||||
///
|
||||
/// Check the suspension status of a target user
|
||||
pub(crate) async fn get_suspended_status(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<get_suspended::v1::Request>,
|
||||
) -> Result<get_suspended::v1::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
let (admin, active) =
|
||||
join(services.users.is_admin(sender_user), services.users.is_active(&body.user_id)).await;
|
||||
if !admin {
|
||||
return Err!(Request(Forbidden("Only server administrators can use this endpoint")));
|
||||
}
|
||||
if !services.globals.user_is_local(&body.user_id) {
|
||||
return Err!(Request(InvalidParam("Can only check the suspended status of local users")));
|
||||
}
|
||||
if !active {
|
||||
return Err!(Request(NotFound("Unknown user")));
|
||||
}
|
||||
Ok(get_suspended::v1::Response::new(
|
||||
services.users.is_suspended(&body.user_id).await?,
|
||||
))
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/v1/admin/suspend/{userId}`
|
||||
///
|
||||
/// Set the suspension status of a target user
|
||||
pub(crate) async fn put_suspended_status(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<set_suspended::v1::Request>,
|
||||
) -> Result<set_suspended::v1::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
let (sender_admin, active, target_admin) = join3(
|
||||
services.users.is_admin(sender_user),
|
||||
services.users.is_active(&body.user_id),
|
||||
services.users.is_admin(&body.user_id),
|
||||
)
|
||||
.await;
|
||||
|
||||
if !sender_admin {
|
||||
return Err!(Request(Forbidden("Only server administrators can use this endpoint")));
|
||||
}
|
||||
if !services.globals.user_is_local(&body.user_id) {
|
||||
return Err!(Request(InvalidParam("Can only set the suspended status of local users")));
|
||||
}
|
||||
if !active {
|
||||
return Err!(Request(NotFound("Unknown user")));
|
||||
}
|
||||
if body.user_id == *sender_user {
|
||||
return Err!(Request(Forbidden("You cannot suspend yourself")));
|
||||
}
|
||||
if target_admin {
|
||||
return Err!(Request(Forbidden("You cannot suspend another server administrator")));
|
||||
}
|
||||
if services.users.is_suspended(&body.user_id).await? == body.suspended {
|
||||
// No change
|
||||
return Ok(set_suspended::v1::Response::new(body.suspended));
|
||||
}
|
||||
|
||||
let action = if body.suspended {
|
||||
services
|
||||
.users
|
||||
.suspend_account(&body.user_id, sender_user)
|
||||
.await;
|
||||
"suspended"
|
||||
} else {
|
||||
services.users.unsuspend_account(&body.user_id).await;
|
||||
"unsuspended"
|
||||
};
|
||||
|
||||
if services.config.admin_room_notices {
|
||||
// Notify the admin room that an account has been un/suspended
|
||||
services
|
||||
.admin
|
||||
.send_text(&format!("{} has been {} by {}.", body.user_id, action, sender_user))
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(set_suspended::v1::Response::new(body.suspended))
|
||||
}
|
||||
@@ -19,7 +19,7 @@
|
||||
/// of this server.
|
||||
pub(crate) async fn get_capabilities_route(
|
||||
State(services): State<crate::State>,
|
||||
_body: Ruma<get_capabilities::v3::Request>,
|
||||
body: Ruma<get_capabilities::v3::Request>,
|
||||
) -> Result<get_capabilities::v3::Response> {
|
||||
let available: BTreeMap<RoomVersionId, RoomVersionStability> =
|
||||
Server::available_room_versions().collect();
|
||||
@@ -45,5 +45,14 @@ pub(crate) async fn get_capabilities_route(
|
||||
json!({"enabled": services.config.forget_forced_upon_leave}),
|
||||
)?;
|
||||
|
||||
if services
|
||||
.users
|
||||
.is_admin(body.sender_user.as_ref().unwrap())
|
||||
.await
|
||||
{
|
||||
// Advertise suspension API
|
||||
capabilities.set("uk.timedout.msc4323", json!({"suspend":true, "lock": false}))?;
|
||||
}
|
||||
|
||||
Ok(get_capabilities::v3::Response { capabilities })
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ pub(crate) async fn get_context_route(
|
||||
|
||||
let (base_id, base_pdu, visible) = try_join3(base_id, base_pdu, visible).await?;
|
||||
|
||||
if base_pdu.room_id != *room_id || base_pdu.event_id != *event_id {
|
||||
if base_pdu.room_id_or_hash() != *room_id || base_pdu.event_id != *event_id {
|
||||
return Err!(Request(NotFound("Base event not found.")));
|
||||
}
|
||||
|
||||
@@ -130,7 +130,7 @@ pub(crate) async fn get_context_route(
|
||||
let state_at = events_after
|
||||
.last()
|
||||
.map(ref_at!(1))
|
||||
.map_or(body.event_id.as_ref(), |pdu| pdu.event_id.as_ref());
|
||||
.map_or_else(|| body.event_id.as_ref(), |pdu| pdu.event_id.as_ref());
|
||||
|
||||
let state_ids = services
|
||||
.rooms
|
||||
|
||||
@@ -64,10 +64,14 @@ pub(crate) async fn create_content_route(
|
||||
media_id: &utils::random_string(MXC_LENGTH),
|
||||
};
|
||||
|
||||
services
|
||||
if let Err(e) = services
|
||||
.media
|
||||
.create(mxc, Some(user), Some(&content_disposition), content_type, &body.file)
|
||||
.await?;
|
||||
.await
|
||||
{
|
||||
err!("Failed to save uploaded media: {e}");
|
||||
return Err!(Request(Unknown("Failed to save uploaded media")));
|
||||
}
|
||||
|
||||
let blurhash = body.generate_blurhash.then(|| {
|
||||
services
|
||||
|
||||
@@ -49,7 +49,7 @@ pub(crate) async fn ban_user_route(
|
||||
..current_member_content
|
||||
}),
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
Some(&body.room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -4,11 +4,14 @@
|
||||
Err, Result, debug_error, err, info,
|
||||
matrix::{event::gen_event_id_canonical_json, pdu::PduBuilder},
|
||||
};
|
||||
use futures::{FutureExt, join};
|
||||
use futures::FutureExt;
|
||||
use ruma::{
|
||||
OwnedServerName, RoomId, UserId,
|
||||
api::{client::membership::invite_user, federation::membership::create_invite},
|
||||
events::room::member::{MembershipState, RoomMemberEventContent},
|
||||
events::{
|
||||
invite_permission_config::FilterLevel,
|
||||
room::member::{MembershipState, RoomMemberEventContent},
|
||||
},
|
||||
};
|
||||
use service::Services;
|
||||
|
||||
@@ -47,22 +50,21 @@ pub(crate) async fn invite_user_route(
|
||||
.await?;
|
||||
|
||||
match &body.recipient {
|
||||
| invite_user::v3::InvitationRecipient::UserId { user_id } => {
|
||||
let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id);
|
||||
let recipient_ignored_by_sender =
|
||||
services.users.user_is_ignored(user_id, sender_user);
|
||||
| invite_user::v3::InvitationRecipient::UserId { user_id: recipient_user } => {
|
||||
let sender_filter_level = services
|
||||
.users
|
||||
.invite_filter_level(recipient_user, sender_user)
|
||||
.await;
|
||||
|
||||
let (sender_ignored_recipient, recipient_ignored_by_sender) =
|
||||
join!(sender_ignored_recipient, recipient_ignored_by_sender);
|
||||
|
||||
if sender_ignored_recipient {
|
||||
if !matches!(sender_filter_level, FilterLevel::Allow) {
|
||||
// drop invites if the sender has the recipient filtered
|
||||
return Ok(invite_user::v3::Response {});
|
||||
}
|
||||
|
||||
if let Ok(target_user_membership) = services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.get_member(&body.room_id, user_id)
|
||||
.get_member(&body.room_id, recipient_user)
|
||||
.await
|
||||
{
|
||||
if target_user_membership.membership == MembershipState::Ban {
|
||||
@@ -70,16 +72,27 @@ pub(crate) async fn invite_user_route(
|
||||
}
|
||||
}
|
||||
|
||||
if recipient_ignored_by_sender {
|
||||
// silently drop the invite to the recipient if they've been ignored by the
|
||||
// sender, pretend it worked
|
||||
return Ok(invite_user::v3::Response {});
|
||||
// check for blocked invites if the recipient is a local user.
|
||||
if services.globals.user_is_local(recipient_user) {
|
||||
let recipient_filter_level = services
|
||||
.users
|
||||
.invite_filter_level(sender_user, recipient_user)
|
||||
.await;
|
||||
|
||||
// ignored invites aren't handled here
|
||||
// since the recipient's membership should still be changed to `invite`.
|
||||
// they're filtered out in the individual /sync handlers.
|
||||
if matches!(recipient_filter_level, FilterLevel::Block) {
|
||||
return Err!(Request(InviteBlocked(
|
||||
"{recipient_user} has blocked invites from you."
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
invite_helper(
|
||||
&services,
|
||||
sender_user,
|
||||
user_id,
|
||||
recipient_user,
|
||||
&body.room_id,
|
||||
body.reason.clone(),
|
||||
false,
|
||||
@@ -98,7 +111,7 @@ pub(crate) async fn invite_user_route(
|
||||
pub(crate) async fn invite_helper(
|
||||
services: &Services,
|
||||
sender_user: &UserId,
|
||||
user_id: &UserId,
|
||||
recipient_user: &UserId,
|
||||
room_id: &RoomId,
|
||||
reason: Option<String>,
|
||||
is_direct: bool,
|
||||
@@ -111,12 +124,12 @@ pub(crate) async fn invite_helper(
|
||||
return Err!(Request(Forbidden("Invites are not allowed on this server.")));
|
||||
}
|
||||
|
||||
if !services.globals.user_is_local(user_id) {
|
||||
if !services.globals.user_is_local(recipient_user) {
|
||||
let (pdu, pdu_json, invite_room_state) = {
|
||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||
|
||||
let content = RoomMemberEventContent {
|
||||
avatar_url: services.users.avatar_url(user_id).await.ok(),
|
||||
avatar_url: services.users.avatar_url(recipient_user).await.ok(),
|
||||
is_direct: Some(is_direct),
|
||||
reason,
|
||||
..RoomMemberEventContent::new(MembershipState::Invite)
|
||||
@@ -126,14 +139,14 @@ pub(crate) async fn invite_helper(
|
||||
.rooms
|
||||
.timeline
|
||||
.create_hash_and_sign_event(
|
||||
PduBuilder::state(user_id.to_string(), &content),
|
||||
PduBuilder::state(recipient_user.to_string(), &content),
|
||||
sender_user,
|
||||
room_id,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let invite_room_state = services.rooms.state.summary_stripped(&pdu).await;
|
||||
let invite_room_state = services.rooms.state.summary_stripped(&pdu, room_id).await;
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
@@ -144,7 +157,7 @@ pub(crate) async fn invite_helper(
|
||||
|
||||
let response = services
|
||||
.sending
|
||||
.send_federation_request(user_id.server_name(), create_invite::v2::Request {
|
||||
.send_federation_request(recipient_user.server_name(), create_invite::v2::Request {
|
||||
room_id: room_id.to_owned(),
|
||||
event_id: (*pdu.event_id).to_owned(),
|
||||
room_version: room_version_id.clone(),
|
||||
@@ -173,7 +186,7 @@ pub(crate) async fn invite_helper(
|
||||
return Err!(Request(BadJson(warn!(
|
||||
%pdu.event_id, %event_id,
|
||||
"Server {} sent event with wrong event ID",
|
||||
user_id.server_name()
|
||||
recipient_user.server_name()
|
||||
))));
|
||||
}
|
||||
|
||||
@@ -213,9 +226,9 @@ pub(crate) async fn invite_helper(
|
||||
let state_lock = services.rooms.state.mutex.lock(room_id).await;
|
||||
|
||||
let content = RoomMemberEventContent {
|
||||
displayname: services.users.displayname(user_id).await.ok(),
|
||||
avatar_url: services.users.avatar_url(user_id).await.ok(),
|
||||
blurhash: services.users.blurhash(user_id).await.ok(),
|
||||
displayname: services.users.displayname(recipient_user).await.ok(),
|
||||
avatar_url: services.users.avatar_url(recipient_user).await.ok(),
|
||||
blurhash: services.users.blurhash(recipient_user).await.ok(),
|
||||
is_direct: Some(is_direct),
|
||||
reason,
|
||||
..RoomMemberEventContent::new(MembershipState::Invite)
|
||||
@@ -225,9 +238,9 @@ pub(crate) async fn invite_helper(
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(user_id.to_string(), &content),
|
||||
PduBuilder::state(recipient_user.to_string(), &content),
|
||||
sender_user,
|
||||
room_id,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
},
|
||||
warn,
|
||||
};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
use futures::{FutureExt, StreamExt, TryFutureExt};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId,
|
||||
RoomVersionId, UserId,
|
||||
@@ -156,31 +156,34 @@ pub(crate) async fn join_room_by_id_or_alias_route(
|
||||
.await?;
|
||||
|
||||
let mut servers = body.via.clone();
|
||||
servers.extend(
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await,
|
||||
);
|
||||
if servers.is_empty() {
|
||||
debug!("No via servers provided for join, injecting some.");
|
||||
servers.extend(
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.servers_invite_via(&room_id)
|
||||
.map(ToOwned::to_owned)
|
||||
.collect::<Vec<_>>()
|
||||
.await,
|
||||
);
|
||||
|
||||
servers.extend(
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.invite_state(sender_user, &room_id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.filter_map(|event| event.get_field("sender").ok().flatten())
|
||||
.filter_map(|sender: &str| UserId::parse(sender).ok())
|
||||
.map(|user| user.server_name().to_owned()),
|
||||
);
|
||||
servers.extend(
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
.invite_state(sender_user, &room_id)
|
||||
.await
|
||||
.unwrap_or_default()
|
||||
.iter()
|
||||
.filter_map(|event| event.get_field("sender").ok().flatten())
|
||||
.filter_map(|sender: &str| UserId::parse(sender).ok())
|
||||
.map(|user| user.server_name().to_owned()),
|
||||
);
|
||||
|
||||
if let Some(server) = room_id.server_name() {
|
||||
servers.push(server.to_owned());
|
||||
if let Some(server) = room_id.server_name() {
|
||||
servers.push(server.to_owned());
|
||||
}
|
||||
}
|
||||
|
||||
servers.sort_unstable();
|
||||
@@ -310,11 +313,14 @@ pub async fn join_room_by_id_helper(
|
||||
}
|
||||
}
|
||||
|
||||
let local_join = server_in_room
|
||||
|| servers.is_empty()
|
||||
|| (servers.len() == 1 && services.globals.server_is_ours(&servers[0]));
|
||||
if !server_in_room && servers.is_empty() {
|
||||
return Err!(Request(NotFound(
|
||||
"No servers were provided to assist in joining the room remotely, and we are not \
|
||||
already participating in the room."
|
||||
)));
|
||||
}
|
||||
|
||||
if local_join {
|
||||
if server_in_room {
|
||||
join_room_by_id_helper_local(
|
||||
services,
|
||||
sender_user,
|
||||
@@ -553,6 +559,10 @@ async fn join_room_by_id_helper_remote(
|
||||
services
|
||||
.server_keys
|
||||
.validate_and_add_event_id_no_fetch(pdu, &room_version_id)
|
||||
.inspect_err(|e| {
|
||||
debug_warn!("Could not validate send_join response room_state event: {e:?}");
|
||||
})
|
||||
.inspect(|_| debug!("Completed validating send_join response room_state event"))
|
||||
})
|
||||
.ready_filter_map(Result::ok)
|
||||
.fold(HashMap::new(), |mut state, (event_id, value)| async move {
|
||||
@@ -563,7 +573,6 @@ async fn join_room_by_id_helper_remote(
|
||||
return state;
|
||||
},
|
||||
};
|
||||
|
||||
services.rooms.outlier.add_pdu_outlier(&event_id, &value);
|
||||
if let Some(state_key) = &pdu.state_key {
|
||||
let shortstatekey = services
|
||||
@@ -574,7 +583,6 @@ async fn join_room_by_id_helper_remote(
|
||||
|
||||
state.insert(shortstatekey, pdu.event_id.clone());
|
||||
}
|
||||
|
||||
state
|
||||
})
|
||||
.await;
|
||||
@@ -595,6 +603,7 @@ async fn join_room_by_id_helper_remote(
|
||||
})
|
||||
.ready_filter_map(Result::ok)
|
||||
.ready_for_each(|(event_id, value)| {
|
||||
trace!(%event_id, "Adding PDU as an outlier from send_join auth_chain");
|
||||
services.rooms.outlier.add_pdu_outlier(&event_id, &value);
|
||||
})
|
||||
.await;
|
||||
@@ -615,6 +624,9 @@ async fn join_room_by_id_helper_remote(
|
||||
&parsed_join_pdu,
|
||||
None, // TODO: third party invite
|
||||
|k, s| state_fetch(k.clone(), s.into()),
|
||||
&state_fetch(StateEventType::RoomCreate, "".into())
|
||||
.await
|
||||
.expect("create event is missing from send_join auth"),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?;
|
||||
@@ -649,7 +661,7 @@ async fn join_room_by_id_helper_remote(
|
||||
.force_state(room_id, statehash_before_join, added, removed, &state_lock)
|
||||
.await?;
|
||||
|
||||
info!("Updating joined counts for new room");
|
||||
debug!("Updating joined counts for new room");
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
@@ -662,7 +674,7 @@ async fn join_room_by_id_helper_remote(
|
||||
let statehash_after_join = services
|
||||
.rooms
|
||||
.state
|
||||
.append_to_state(&parsed_join_pdu)
|
||||
.append_to_state(&parsed_join_pdu, room_id)
|
||||
.await?;
|
||||
|
||||
info!("Appending new room join event");
|
||||
@@ -674,6 +686,7 @@ async fn join_room_by_id_helper_remote(
|
||||
join_event,
|
||||
once(parsed_join_pdu.event_id.borrow()),
|
||||
&state_lock,
|
||||
room_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -729,6 +742,7 @@ async fn join_room_by_id_helper_local(
|
||||
.iter()
|
||||
.stream()
|
||||
.any(|restriction_room_id| {
|
||||
trace!("Checking if {sender_user} is joined to {restriction_room_id}");
|
||||
services
|
||||
.rooms
|
||||
.state_cache
|
||||
@@ -741,6 +755,7 @@ async fn join_room_by_id_helper_local(
|
||||
.state_cache
|
||||
.local_users_in_room(room_id)
|
||||
.filter(|user| {
|
||||
trace!("Checking if {user} can invite {sender_user} to {room_id}");
|
||||
services.rooms.state_accessor.user_can_invite(
|
||||
room_id,
|
||||
user,
|
||||
@@ -753,6 +768,7 @@ async fn join_room_by_id_helper_local(
|
||||
.await
|
||||
.map(ToOwned::to_owned)
|
||||
} else {
|
||||
trace!("No restriction rooms are joined by {sender_user}");
|
||||
None
|
||||
}
|
||||
};
|
||||
@@ -773,7 +789,7 @@ async fn join_room_by_id_helper_local(
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(sender_user.to_string(), &content),
|
||||
sender_user,
|
||||
room_id,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await
|
||||
|
||||
@@ -54,7 +54,7 @@ pub(crate) async fn kick_user_route(
|
||||
..event
|
||||
}),
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
Some(&body.room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -373,7 +373,7 @@ async fn knock_room_helper_local(
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(sender_user.to_string(), &content),
|
||||
sender_user,
|
||||
room_id,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await
|
||||
@@ -502,6 +502,7 @@ async fn knock_room_helper_local(
|
||||
knock_event,
|
||||
once(parsed_knock_pdu.event_id.borrow()),
|
||||
&state_lock,
|
||||
room_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -672,7 +673,7 @@ async fn knock_room_helper_remote(
|
||||
let statehash_after_knock = services
|
||||
.rooms
|
||||
.state
|
||||
.append_to_state(&parsed_knock_pdu)
|
||||
.append_to_state(&parsed_knock_pdu, room_id)
|
||||
.await?;
|
||||
|
||||
info!("Updating membership locally to knock state with provided stripped state events");
|
||||
@@ -701,6 +702,7 @@ async fn knock_room_helper_remote(
|
||||
knock_event,
|
||||
once(parsed_knock_pdu.event_id.borrow()),
|
||||
&state_lock,
|
||||
room_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -206,7 +206,7 @@ pub async fn leave_room(
|
||||
..event
|
||||
}),
|
||||
user_id,
|
||||
room_id,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
@@ -215,7 +215,7 @@ pub async fn leave_room(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn remote_leave_room(
|
||||
pub async fn remote_leave_room(
|
||||
services: &Services,
|
||||
user_id: &UserId,
|
||||
room_id: &RoomId,
|
||||
|
||||
@@ -29,7 +29,7 @@
|
||||
};
|
||||
pub use self::{
|
||||
join::join_room_by_id_helper,
|
||||
leave::{leave_all_rooms, leave_room},
|
||||
leave::{leave_all_rooms, leave_room, remote_leave_room},
|
||||
};
|
||||
use crate::{Ruma, client::full_user_deactivate};
|
||||
|
||||
@@ -69,11 +69,11 @@ pub(crate) async fn banned_room_check(
|
||||
}
|
||||
|
||||
if let Some(room_id) = room_id {
|
||||
if services.rooms.metadata.is_banned(room_id).await
|
||||
|| services
|
||||
.moderation
|
||||
.is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid"))
|
||||
{
|
||||
let room_banned = services.rooms.metadata.is_banned(room_id).await;
|
||||
let server_banned = room_id.server_name().is_some_and(|server_name| {
|
||||
services.moderation.is_remote_server_forbidden(server_name)
|
||||
});
|
||||
if room_banned || server_banned {
|
||||
warn!(
|
||||
"User {user_id} who is not an admin attempted to send an invite for or \
|
||||
attempted to join a banned room or banned room server name: {room_id}"
|
||||
@@ -106,7 +106,6 @@ pub(crate) async fn banned_room_check(
|
||||
.boxed()
|
||||
.await?;
|
||||
}
|
||||
|
||||
return Err!(Request(Forbidden("This room is banned on this homeserver.")));
|
||||
}
|
||||
} else if let Some(server_name) = server_name {
|
||||
|
||||
@@ -47,7 +47,7 @@ pub(crate) async fn unban_user_route(
|
||||
..current_member_content
|
||||
}),
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
Some(&body.room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
ref_at,
|
||||
utils::{
|
||||
IterStream, ReadyExt,
|
||||
result::{FlatOk, LogErr},
|
||||
result::LogErr,
|
||||
stream::{BroadbandExt, TryIgnore, WidebandExt},
|
||||
},
|
||||
};
|
||||
@@ -30,6 +30,7 @@
|
||||
events::{
|
||||
AnyStateEvent, StateEventType,
|
||||
TimelineEventType::{self, *},
|
||||
invite_permission_config::FilterLevel,
|
||||
},
|
||||
serde::Raw,
|
||||
};
|
||||
@@ -91,7 +92,7 @@ pub(crate) async fn get_message_events_route(
|
||||
| Direction::Backward => PduCount::max(),
|
||||
});
|
||||
|
||||
let to: Option<PduCount> = body.to.as_deref().map(str::parse).flat_ok();
|
||||
let to: Option<PduCount> = body.to.as_deref().map(str::parse).transpose()?;
|
||||
|
||||
let limit: usize = body
|
||||
.limit
|
||||
@@ -181,7 +182,7 @@ pub(crate) async fn get_message_events_route(
|
||||
|
||||
Ok(get_message_events::v3::Response {
|
||||
start: from.to_string(),
|
||||
end: next_token.as_ref().map(ToString::to_string),
|
||||
end: next_token.as_ref().map(PduCount::to_string),
|
||||
chunk,
|
||||
state,
|
||||
})
|
||||
@@ -267,7 +268,7 @@ pub(crate) async fn ignored_filter(
|
||||
pub(crate) async fn is_ignored_pdu<Pdu>(
|
||||
services: &Services,
|
||||
event: &Pdu,
|
||||
user_id: &UserId,
|
||||
recipient_user: &UserId,
|
||||
) -> bool
|
||||
where
|
||||
Pdu: Event + Send + Sync,
|
||||
@@ -278,20 +279,29 @@ pub(crate) async fn is_ignored_pdu<Pdu>(
|
||||
return true;
|
||||
}
|
||||
|
||||
let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(event.kind()).is_ok();
|
||||
|
||||
let ignored_server = services
|
||||
let sender_user = event.sender();
|
||||
let type_ignored = IGNORED_MESSAGE_TYPES.binary_search(event.kind()).is_ok();
|
||||
let server_ignored = services
|
||||
.moderation
|
||||
.is_remote_server_ignored(event.sender().server_name());
|
||||
.is_remote_server_ignored(sender_user.server_name());
|
||||
let user_ignored = services
|
||||
.users
|
||||
.user_is_ignored(sender_user, recipient_user)
|
||||
.await;
|
||||
|
||||
if ignored_type
|
||||
&& (ignored_server
|
||||
|| (!services.config.send_messages_from_ignored_users_to_client
|
||||
&& services
|
||||
.users
|
||||
.user_is_ignored(event.sender(), user_id)
|
||||
.await))
|
||||
{
|
||||
if !type_ignored {
|
||||
// We cannot safely ignore this type
|
||||
return false;
|
||||
}
|
||||
|
||||
if server_ignored {
|
||||
// the sender's server is ignored, so ignore this event
|
||||
return true;
|
||||
}
|
||||
|
||||
if user_ignored && !services.config.send_messages_from_ignored_users_to_client {
|
||||
// the recipient of this PDU has the sender ignored, and we're not
|
||||
// configured to send ignored messages to clients
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -309,7 +319,7 @@ pub(crate) async fn visibility_filter(
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_event(user_id, pdu.room_id(), pdu.event_id())
|
||||
.user_can_see_event(user_id, &pdu.room_id_or_hash(), pdu.event_id())
|
||||
.await
|
||||
.then_some(item)
|
||||
}
|
||||
@@ -320,7 +330,30 @@ pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Opti
|
||||
filter.matches(pdu).then_some(item)
|
||||
}
|
||||
|
||||
#[cfg_attr(debug_assertions, conduwuit::ctor)]
|
||||
#[inline]
|
||||
pub(crate) async fn is_ignored_invite(
|
||||
services: &Services,
|
||||
recipient_user: &UserId,
|
||||
room_id: &RoomId,
|
||||
) -> bool {
|
||||
let Ok(sender_user) = services
|
||||
.rooms
|
||||
.state_cache
|
||||
.invite_sender(recipient_user, room_id)
|
||||
.await
|
||||
else {
|
||||
// the invite may have been sent before the invite_sender table existed.
|
||||
// assume it's not ignored
|
||||
return false;
|
||||
};
|
||||
|
||||
services
|
||||
.users
|
||||
.invite_filter_level(&sender_user, recipient_user)
|
||||
.await == FilterLevel::Ignore
|
||||
}
|
||||
|
||||
#[cfg_attr(debug_assertions, ctor::ctor)]
|
||||
fn _is_sorted() {
|
||||
debug_assert!(
|
||||
IGNORED_MESSAGE_TYPES.is_sorted(),
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
pub(super) mod account;
|
||||
pub(super) mod account_data;
|
||||
pub(super) mod admin;
|
||||
pub(super) mod alias;
|
||||
pub(super) mod appservice;
|
||||
pub(super) mod backup;
|
||||
@@ -42,6 +43,7 @@
|
||||
pub use account::full_user_deactivate;
|
||||
pub(super) use account::*;
|
||||
pub(super) use account_data::*;
|
||||
pub(super) use admin::*;
|
||||
pub(super) use alias::*;
|
||||
pub(super) use appservice::*;
|
||||
pub(super) use backup::*;
|
||||
@@ -54,7 +56,7 @@
|
||||
pub(super) use media::*;
|
||||
pub(super) use media_legacy::*;
|
||||
pub(super) use membership::*;
|
||||
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room};
|
||||
pub use membership::{join_room_by_id_helper, leave_all_rooms, leave_room, remote_leave_room};
|
||||
pub(super) use message::*;
|
||||
pub(super) use openid::*;
|
||||
pub(super) use presence::*;
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Result,
|
||||
@@ -90,7 +88,7 @@ pub(crate) async fn get_displayname_route(
|
||||
.await
|
||||
{
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
services.users.create(&body.user_id, None)?;
|
||||
services.users.create(&body.user_id, None, None).await?;
|
||||
}
|
||||
|
||||
services
|
||||
@@ -189,7 +187,7 @@ pub(crate) async fn get_avatar_url_route(
|
||||
.await
|
||||
{
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
services.users.create(&body.user_id, None)?;
|
||||
services.users.create(&body.user_id, None, None).await?;
|
||||
}
|
||||
|
||||
services
|
||||
@@ -226,7 +224,8 @@ pub(crate) async fn get_avatar_url_route(
|
||||
|
||||
/// # `GET /_matrix/client/v3/profile/{userId}`
|
||||
///
|
||||
/// Returns the displayname, avatar_url, blurhash, and tz of the user.
|
||||
/// Returns the displayname, avatar_url, blurhash, and custom profile fields of
|
||||
/// the user.
|
||||
///
|
||||
/// - If user is on another server and we do not have a local copy already,
|
||||
/// fetch profile over federation.
|
||||
@@ -248,7 +247,7 @@ pub(crate) async fn get_profile_route(
|
||||
.await
|
||||
{
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
services.users.create(&body.user_id, None)?;
|
||||
services.users.create(&body.user_id, None, None).await?;
|
||||
}
|
||||
|
||||
services
|
||||
@@ -260,9 +259,6 @@ pub(crate) async fn get_profile_route(
|
||||
services
|
||||
.users
|
||||
.set_blurhash(&body.user_id, response.blurhash.clone());
|
||||
services
|
||||
.users
|
||||
.set_timezone(&body.user_id, response.tz.clone());
|
||||
|
||||
for (profile_key, profile_key_value) in &response.custom_profile_fields {
|
||||
services.users.set_profile_key(
|
||||
@@ -276,7 +272,6 @@ pub(crate) async fn get_profile_route(
|
||||
displayname: response.displayname,
|
||||
avatar_url: response.avatar_url,
|
||||
blurhash: response.blurhash,
|
||||
tz: response.tz,
|
||||
custom_profile_fields: response.custom_profile_fields,
|
||||
});
|
||||
}
|
||||
@@ -288,21 +283,11 @@ pub(crate) async fn get_profile_route(
|
||||
return Err!(Request(NotFound("Profile was not found.")));
|
||||
}
|
||||
|
||||
let mut custom_profile_fields: BTreeMap<String, serde_json::Value> = services
|
||||
.users
|
||||
.all_profile_keys(&body.user_id)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
// services.users.timezone will collect the MSC4175 timezone key if it exists
|
||||
custom_profile_fields.remove("us.cloke.msc4175.tz");
|
||||
custom_profile_fields.remove("m.tz");
|
||||
|
||||
let (avatar_url, blurhash, displayname, tz) = join4(
|
||||
let (avatar_url, blurhash, displayname, custom_profile_fields) = join4(
|
||||
services.users.avatar_url(&body.user_id).ok(),
|
||||
services.users.blurhash(&body.user_id).ok(),
|
||||
services.users.displayname(&body.user_id).ok(),
|
||||
services.users.timezone(&body.user_id).ok(),
|
||||
services.users.all_profile_keys(&body.user_id).collect(),
|
||||
)
|
||||
.await;
|
||||
|
||||
@@ -310,7 +295,6 @@ pub(crate) async fn get_profile_route(
|
||||
avatar_url,
|
||||
blurhash,
|
||||
displayname,
|
||||
tz,
|
||||
custom_profile_fields,
|
||||
})
|
||||
}
|
||||
@@ -423,7 +407,7 @@ pub async fn update_all_rooms(
|
||||
if let Err(e) = services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu_builder, user_id, room_id, &state_lock)
|
||||
.build_and_append_pdu(pdu_builder, user_id, Some(room_id), &state_lock)
|
||||
.await
|
||||
{
|
||||
warn!(%user_id, %room_id, "Failed to update/send new profile join membership update in room: {e}");
|
||||
|
||||
@@ -36,7 +36,7 @@ pub(crate) async fn redact_event_route(
|
||||
})
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
Some(&body.room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -117,7 +117,7 @@ async fn paginate_relations_with_filter(
|
||||
| Direction::Backward => PduCount::max(),
|
||||
});
|
||||
|
||||
let to: Option<PduCount> = to.map(str::parse).flat_ok();
|
||||
let to: Option<PduCount> = to.map(str::parse).transpose()?;
|
||||
|
||||
// Use limit or else 30, with maximum 100
|
||||
let limit: usize = limit
|
||||
@@ -129,6 +129,11 @@ async fn paginate_relations_with_filter(
|
||||
// Spec (v1.10) recommends depth of at least 3
|
||||
let depth: u8 = if recurse { 3 } else { 1 };
|
||||
|
||||
// Check if this is a thread request
|
||||
let is_thread = filter_rel_type
|
||||
.as_ref()
|
||||
.is_some_and(|rel| *rel == RelationType::Thread);
|
||||
|
||||
let events: Vec<_> = services
|
||||
.rooms
|
||||
.pdu_metadata
|
||||
@@ -152,23 +157,58 @@ async fn paginate_relations_with_filter(
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
let next_batch = match dir {
|
||||
| Direction::Forward => events.last(),
|
||||
| Direction::Backward => events.first(),
|
||||
// For threads, check if we should include the root event
|
||||
let mut root_event = None;
|
||||
if is_thread && dir == Direction::Backward {
|
||||
// Check if we've reached the beginning of the thread
|
||||
// (fewer events than requested means we've exhausted the thread)
|
||||
if events.len() < limit {
|
||||
// Try to get the thread root event
|
||||
if let Ok(root_pdu) = services.rooms.timeline.get_pdu(target).await {
|
||||
// Check visibility
|
||||
if services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_event(sender_user, room_id, target)
|
||||
.await
|
||||
{
|
||||
// Store the root event to add to the response
|
||||
root_event = Some(root_pdu);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
.map(at!(0))
|
||||
.as_ref()
|
||||
.map(ToString::to_string);
|
||||
|
||||
// Determine if there are more events to fetch
|
||||
let has_more = if root_event.is_some() {
|
||||
false // We've included the root, no more events
|
||||
} else {
|
||||
// Check if we got a full page of results (might be more)
|
||||
events.len() >= limit
|
||||
};
|
||||
|
||||
let next_batch = if has_more {
|
||||
match dir {
|
||||
| Direction::Forward => events.last(),
|
||||
| Direction::Backward => events.first(),
|
||||
}
|
||||
.map(|(count, _)| count.to_string())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Build the response chunk with thread root if needed
|
||||
let chunk: Vec<_> = root_event
|
||||
.into_iter()
|
||||
.map(Event::into_format)
|
||||
.chain(events.into_iter().map(at!(1)).map(Event::into_format))
|
||||
.collect();
|
||||
|
||||
Ok(get_relating_events::v1::Response {
|
||||
next_batch,
|
||||
prev_batch: from.map(Into::into),
|
||||
recursion_depth: recurse.then_some(depth.into()),
|
||||
chunk: events
|
||||
.into_iter()
|
||||
.map(at!(1))
|
||||
.map(Event::into_format)
|
||||
.collect(),
|
||||
chunk,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -182,7 +222,7 @@ async fn visibility_filter<Pdu: Event + Send + Sync>(
|
||||
services
|
||||
.rooms
|
||||
.state_accessor
|
||||
.user_can_see_event(sender_user, pdu.room_id(), pdu.event_id())
|
||||
.user_can_see_event(sender_user, &pdu.room_id_or_hash(), pdu.event_id())
|
||||
.await
|
||||
.then_some(item)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use std::{fmt::Write as _, ops::Mul, time::Duration};
|
||||
use std::{fmt::Write as _, time::Duration};
|
||||
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Err, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt};
|
||||
use conduwuit::{Err, Event, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt};
|
||||
use conduwuit_service::Services;
|
||||
use rand::Rng;
|
||||
use ruma::{
|
||||
@@ -12,7 +12,6 @@
|
||||
room::{report_content, report_room},
|
||||
},
|
||||
events::{Mentions, room::message::RoomMessageEventContent},
|
||||
int,
|
||||
};
|
||||
use tokio::time::sleep;
|
||||
|
||||
@@ -25,7 +24,6 @@ struct Report {
|
||||
user_id: Option<OwnedUserId>,
|
||||
report_type: String,
|
||||
reason: Option<String>,
|
||||
score: Option<ruma::Int>,
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/v3/rooms/{roomId}/report`
|
||||
@@ -50,6 +48,15 @@ pub(crate) async fn report_room_route(
|
||||
|
||||
delay_response().await;
|
||||
|
||||
// We log this early in case the room ID does actually exist, in which case
|
||||
// admins who scan their logs can see the report and choose to investigate at
|
||||
// their discretion.
|
||||
info!(
|
||||
"Received room report by user {sender_user} for room {} with reason: \"{}\"",
|
||||
body.room_id,
|
||||
body.reason.as_deref().unwrap_or("")
|
||||
);
|
||||
|
||||
if !services
|
||||
.rooms
|
||||
.state_cache
|
||||
@@ -60,11 +67,6 @@ pub(crate) async fn report_room_route(
|
||||
"Room does not exist to us, no local users have joined at all"
|
||||
)));
|
||||
}
|
||||
info!(
|
||||
"Received room report by user {sender_user} for room {} with reason: \"{}\"",
|
||||
body.room_id,
|
||||
body.reason.as_deref().unwrap_or("")
|
||||
);
|
||||
|
||||
let report = Report {
|
||||
sender: sender_user.to_owned(),
|
||||
@@ -73,7 +75,6 @@ pub(crate) async fn report_room_route(
|
||||
user_id: None,
|
||||
report_type: "room".to_owned(),
|
||||
reason: body.reason.clone(),
|
||||
score: None,
|
||||
};
|
||||
|
||||
services.admin.send_message(build_report(report)).await.ok();
|
||||
@@ -109,7 +110,6 @@ pub(crate) async fn report_event_route(
|
||||
&body.room_id,
|
||||
sender_user,
|
||||
body.reason.as_ref(),
|
||||
body.score,
|
||||
&pdu,
|
||||
)
|
||||
.await?;
|
||||
@@ -127,7 +127,6 @@ pub(crate) async fn report_event_route(
|
||||
user_id: None,
|
||||
report_type: "event".to_owned(),
|
||||
reason: body.reason.clone(),
|
||||
score: body.score,
|
||||
};
|
||||
services.admin.send_message(build_report(report)).await.ok();
|
||||
|
||||
@@ -166,7 +165,6 @@ pub(crate) async fn report_user_route(
|
||||
user_id: Some(body.user_id.clone()),
|
||||
report_type: "user".to_owned(),
|
||||
reason: body.reason.clone(),
|
||||
score: None,
|
||||
};
|
||||
|
||||
info!(
|
||||
@@ -192,7 +190,6 @@ async fn is_event_report_valid(
|
||||
room_id: &RoomId,
|
||||
sender_user: &UserId,
|
||||
reason: Option<&String>,
|
||||
score: Option<ruma::Int>,
|
||||
pdu: &PduEvent,
|
||||
) -> Result<()> {
|
||||
debug_info!(
|
||||
@@ -200,14 +197,10 @@ async fn is_event_report_valid(
|
||||
valid"
|
||||
);
|
||||
|
||||
if room_id != pdu.room_id {
|
||||
if room_id != pdu.room_id_or_hash() {
|
||||
return Err!(Request(NotFound("Event ID does not belong to the reported room",)));
|
||||
}
|
||||
|
||||
if score.is_some_and(|s| s > int!(0) || s < int!(-100)) {
|
||||
return Err!(Request(InvalidParam("Invalid score, must be within 0 to -100",)));
|
||||
}
|
||||
|
||||
if reason.as_ref().is_some_and(|s| s.len() > 750) {
|
||||
return Err!(Request(
|
||||
InvalidParam("Reason too long, should be 750 characters or fewer",)
|
||||
@@ -240,9 +233,6 @@ fn build_report(report: Report) -> RoomMessageEventContent {
|
||||
if report.event_id.is_some() {
|
||||
let _ = writeln!(text, "- Reported Event ID: `{}`", report.event_id.unwrap());
|
||||
}
|
||||
if let Some(score) = report.score {
|
||||
let _ = writeln!(text, "- User-supplied offensiveness score: {}%", score.mul(int!(-1)));
|
||||
}
|
||||
if let Some(reason) = report.reason {
|
||||
let _ = writeln!(text, "- Report Reason: {reason}");
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Result, debug_info, debug_warn, err, info,
|
||||
Err, Result, RoomVersion, debug, debug_info, debug_warn, err, info,
|
||||
matrix::{StateKey, pdu::PduBuilder},
|
||||
warn,
|
||||
trace, warn,
|
||||
};
|
||||
use conduwuit_service::{Services, appservice::RegistrationInfo};
|
||||
use futures::FutureExt;
|
||||
@@ -13,6 +13,7 @@
|
||||
api::client::room::{self, create_room},
|
||||
events::{
|
||||
TimelineEventType,
|
||||
invite_permission_config::FilterLevel,
|
||||
room::{
|
||||
canonical_alias::RoomCanonicalAliasEventContent,
|
||||
create::RoomCreateEventContent,
|
||||
@@ -49,6 +50,7 @@
|
||||
/// - Send events implied by `name` and `topic`
|
||||
/// - Send invite events
|
||||
#[allow(clippy::large_stack_frames)]
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
pub(crate) async fn create_room_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<create_room::v3::Request>,
|
||||
@@ -68,51 +70,6 @@ pub(crate) async fn create_room_route(
|
||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||
}
|
||||
|
||||
let room_id: OwnedRoomId = match &body.room_id {
|
||||
| Some(custom_room_id) => custom_room_id_check(&services, custom_room_id)?,
|
||||
| _ => RoomId::new(&services.server.name),
|
||||
};
|
||||
|
||||
// check if room ID doesn't already exist instead of erroring on auth check
|
||||
if services.rooms.short.get_shortroomid(&room_id).await.is_ok() {
|
||||
return Err!(Request(RoomInUse("Room with that custom room ID already exists",)));
|
||||
}
|
||||
|
||||
if body.visibility == room::Visibility::Public
|
||||
&& services.server.config.lockdown_public_room_directory
|
||||
&& !services.users.is_admin(sender_user).await
|
||||
&& body.appservice_info.is_none()
|
||||
{
|
||||
warn!(
|
||||
"Non-admin user {sender_user} tried to publish {room_id} to the room directory \
|
||||
while \"lockdown_public_room_directory\" is enabled"
|
||||
);
|
||||
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.notice(&format!(
|
||||
"Non-admin user {sender_user} tried to publish {room_id} to the room \
|
||||
directory while \"lockdown_public_room_directory\" is enabled"
|
||||
))
|
||||
.await;
|
||||
}
|
||||
|
||||
return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed")));
|
||||
}
|
||||
let _short_id = services
|
||||
.rooms
|
||||
.short
|
||||
.get_or_create_shortroomid(&room_id)
|
||||
.await;
|
||||
let state_lock = services.rooms.state.mutex.lock(&room_id).await;
|
||||
|
||||
let alias: Option<OwnedRoomAliasId> = match body.room_alias_name.as_ref() {
|
||||
| Some(alias) =>
|
||||
Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?),
|
||||
| _ => None,
|
||||
};
|
||||
|
||||
let room_version = match body.room_version.clone() {
|
||||
| Some(room_version) =>
|
||||
if services.server.supported_room_version(&room_version) {
|
||||
@@ -124,6 +81,86 @@ pub(crate) async fn create_room_route(
|
||||
},
|
||||
| None => services.server.config.default_room_version.clone(),
|
||||
};
|
||||
let room_features = RoomVersion::new(&room_version)?;
|
||||
|
||||
let room_id: Option<OwnedRoomId> = if !room_features.room_ids_as_hashes {
|
||||
match &body.room_id {
|
||||
| Some(custom_room_id) => Some(custom_room_id_check(&services, custom_room_id)?),
|
||||
| None => Some(RoomId::new(services.globals.server_name())),
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// check if room ID doesn't already exist instead of erroring on auth check
|
||||
if let Some(ref room_id) = room_id {
|
||||
if services.rooms.short.get_shortroomid(room_id).await.is_ok() {
|
||||
return Err!(Request(RoomInUse("Room with that custom room ID already exists",)));
|
||||
}
|
||||
}
|
||||
|
||||
if body.visibility == room::Visibility::Public
|
||||
&& services.server.config.lockdown_public_room_directory
|
||||
&& !services.users.is_admin(sender_user).await
|
||||
&& body.appservice_info.is_none()
|
||||
{
|
||||
warn!(
|
||||
"Non-admin user {sender_user} tried to publish {room_id:?} to the room directory \
|
||||
while \"lockdown_public_room_directory\" is enabled"
|
||||
);
|
||||
|
||||
if services.server.config.admin_room_notices {
|
||||
services
|
||||
.admin
|
||||
.notice(&format!(
|
||||
"Non-admin user {sender_user} tried to publish {room_id:?} to the room \
|
||||
directory while \"lockdown_public_room_directory\" is enabled"
|
||||
))
|
||||
.await;
|
||||
}
|
||||
|
||||
return Err!(Request(Forbidden("Publishing rooms to the room directory is not allowed")));
|
||||
}
|
||||
|
||||
let mut invitees = BTreeSet::new();
|
||||
|
||||
for recipient_user in &body.invite {
|
||||
if !matches!(
|
||||
services
|
||||
.users
|
||||
.invite_filter_level(recipient_user, sender_user)
|
||||
.await,
|
||||
FilterLevel::Allow
|
||||
) {
|
||||
// drop invites if the creator has them blocked
|
||||
continue;
|
||||
}
|
||||
|
||||
// if the recipient of the invite is local and has the sender blocked, error
|
||||
// out. if the recipient is remote we can't tell yet, and if they're local and
|
||||
// have the sender _ignored_ their invite will be filtered out in
|
||||
// the handlers for the individual /sync endpoints
|
||||
if services.globals.user_is_local(recipient_user)
|
||||
&& matches!(
|
||||
services
|
||||
.users
|
||||
.invite_filter_level(sender_user, recipient_user)
|
||||
.await,
|
||||
FilterLevel::Block
|
||||
) {
|
||||
return Err!(Request(InviteBlocked(
|
||||
"{recipient_user} has blocked invites from you."
|
||||
)));
|
||||
}
|
||||
|
||||
invitees.insert(recipient_user.clone());
|
||||
}
|
||||
|
||||
let alias: Option<OwnedRoomAliasId> = match body.room_alias_name.as_ref() {
|
||||
| Some(alias) =>
|
||||
Some(room_alias_check(&services, alias, body.appservice_info.as_ref()).await?),
|
||||
| _ => None,
|
||||
};
|
||||
|
||||
let create_content = match &body.creation_content {
|
||||
| Some(content) => {
|
||||
@@ -164,18 +201,36 @@ pub(crate) async fn create_room_route(
|
||||
let content = match room_version {
|
||||
| V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 =>
|
||||
RoomCreateEventContent::new_v1(sender_user.to_owned()),
|
||||
| _ => RoomCreateEventContent::new_v11(),
|
||||
| V11 => RoomCreateEventContent::new_v11(),
|
||||
| _ => RoomCreateEventContent::new_v12(),
|
||||
};
|
||||
let mut content =
|
||||
serde_json::from_str::<CanonicalJsonObject>(to_raw_value(&content)?.get())
|
||||
.unwrap();
|
||||
serde_json::from_str::<CanonicalJsonObject>(to_raw_value(&content)?.get())?;
|
||||
content.insert("room_version".into(), json!(room_version.as_str()).try_into()?);
|
||||
content
|
||||
},
|
||||
};
|
||||
|
||||
let state_lock = match room_id.clone() {
|
||||
| Some(room_id) => {
|
||||
let _short_id = services
|
||||
.rooms
|
||||
.short
|
||||
.get_or_create_shortroomid(&room_id)
|
||||
.await;
|
||||
services.rooms.state.mutex.lock(&room_id).await
|
||||
},
|
||||
| None => {
|
||||
let temp_room_id = RoomId::new(services.globals.server_name());
|
||||
trace!("Locking temporary room state mutex for {temp_room_id}");
|
||||
services.rooms.state.mutex.lock(&temp_room_id).await
|
||||
},
|
||||
};
|
||||
|
||||
// 1. The room create event
|
||||
services
|
||||
debug!("Creating room create event for {sender_user} in room {room_id:?}");
|
||||
let tmp_id = room_id.as_deref();
|
||||
let create_event_id = services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
@@ -186,13 +241,26 @@ pub(crate) async fn create_room_route(
|
||||
..Default::default()
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
tmp_id,
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
.await?;
|
||||
trace!("Created room create event with ID {}", &create_event_id);
|
||||
let room_id = match room_id.clone() {
|
||||
| Some(room_id) => room_id,
|
||||
| None => {
|
||||
let as_room_id = create_event_id.as_str().replace('$', "!");
|
||||
trace!("Creating room with v12 room ID {as_room_id}");
|
||||
RoomId::parse(&as_room_id)?.to_owned()
|
||||
},
|
||||
};
|
||||
drop(state_lock);
|
||||
debug!("Room created with ID {room_id}");
|
||||
let state_lock = services.rooms.state.mutex.lock(&room_id).await;
|
||||
|
||||
// 2. Let the room creator join
|
||||
debug_info!("Joining {sender_user} to room {room_id}");
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
@@ -205,7 +273,7 @@ pub(crate) async fn create_room_route(
|
||||
..RoomMemberEventContent::new(MembershipState::Join)
|
||||
}),
|
||||
sender_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
@@ -219,26 +287,45 @@ pub(crate) async fn create_room_route(
|
||||
| _ => RoomPreset::PrivateChat, // Room visibility should not be custom
|
||||
});
|
||||
|
||||
let mut users = BTreeMap::from_iter([(sender_user.to_owned(), int!(100))]);
|
||||
let mut power_levels_to_grant = BTreeMap::from_iter([(sender_user.to_owned(), int!(100))]);
|
||||
|
||||
if preset == RoomPreset::TrustedPrivateChat {
|
||||
for invite in &body.invite {
|
||||
if services.users.user_is_ignored(sender_user, invite).await {
|
||||
continue;
|
||||
} else if services.users.user_is_ignored(invite, sender_user).await {
|
||||
// silently drop the invite to the recipient if they've been ignored by the
|
||||
// sender, pretend it worked
|
||||
continue;
|
||||
}
|
||||
|
||||
users.insert(invite.clone(), int!(100));
|
||||
for recipient_user in &invitees {
|
||||
power_levels_to_grant.insert(recipient_user.clone(), int!(100));
|
||||
}
|
||||
}
|
||||
|
||||
let mut creators: Vec<OwnedUserId> = vec![sender_user.to_owned()];
|
||||
// Do we care about additional_creators?
|
||||
if room_features.explicitly_privilege_room_creators {
|
||||
// Have they been specified?
|
||||
if let Some(additional_creators) = create_content.get("additional_creators") {
|
||||
// Are they a real array?
|
||||
if let Some(additional_creators) = additional_creators.as_array() {
|
||||
// Iterate through them
|
||||
for creator in additional_creators {
|
||||
// Are they a string?
|
||||
if let Some(creator) = creator.as_str() {
|
||||
// Do they parse into a real user ID?
|
||||
if let Ok(creator) = OwnedUserId::parse(creator) {
|
||||
// Add them to the power levels and creators
|
||||
creators.push(creator.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
power_levels_to_grant.insert(sender_user.to_owned(), int!(100));
|
||||
creators.clear(); // If this vec is not empty, default_power_levels_content will
|
||||
// treat this as a v12 room
|
||||
}
|
||||
|
||||
let power_levels_content = default_power_levels_content(
|
||||
body.power_level_content_override.as_ref(),
|
||||
&body.visibility,
|
||||
users,
|
||||
power_levels_to_grant,
|
||||
creators,
|
||||
)?;
|
||||
|
||||
services
|
||||
@@ -252,7 +339,7 @@ pub(crate) async fn create_room_route(
|
||||
..Default::default()
|
||||
},
|
||||
sender_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
@@ -269,7 +356,7 @@ pub(crate) async fn create_room_route(
|
||||
alt_aliases: vec![],
|
||||
}),
|
||||
sender_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
@@ -292,7 +379,7 @@ pub(crate) async fn create_room_route(
|
||||
}),
|
||||
),
|
||||
sender_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
@@ -308,7 +395,7 @@ pub(crate) async fn create_room_route(
|
||||
&RoomHistoryVisibilityEventContent::new(HistoryVisibility::Shared),
|
||||
),
|
||||
sender_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
@@ -327,7 +414,7 @@ pub(crate) async fn create_room_route(
|
||||
}),
|
||||
),
|
||||
sender_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
@@ -363,7 +450,7 @@ pub(crate) async fn create_room_route(
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)
|
||||
.build_and_append_pdu(pdu_builder, sender_user, Some(&room_id), &state_lock)
|
||||
.boxed()
|
||||
.await?;
|
||||
}
|
||||
@@ -376,7 +463,7 @@ pub(crate) async fn create_room_route(
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(String::new(), &RoomNameEventContent::new(name.clone())),
|
||||
sender_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
@@ -390,7 +477,7 @@ pub(crate) async fn create_room_route(
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(String::new(), &RoomTopicEventContent { topic: topic.clone() }),
|
||||
sender_user,
|
||||
&room_id,
|
||||
Some(&room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
@@ -399,17 +486,9 @@ pub(crate) async fn create_room_route(
|
||||
|
||||
// 8. Events implied by invite (and TODO: invite_3pid)
|
||||
drop(state_lock);
|
||||
for user_id in &body.invite {
|
||||
if services.users.user_is_ignored(sender_user, user_id).await {
|
||||
continue;
|
||||
} else if services.users.user_is_ignored(user_id, sender_user).await {
|
||||
// silently drop the invite to the recipient if they've been ignored by the
|
||||
// sender, pretend it worked
|
||||
continue;
|
||||
}
|
||||
|
||||
for recipient_user in &invitees {
|
||||
if let Err(e) =
|
||||
invite_helper(&services, sender_user, user_id, &room_id, None, body.is_direct)
|
||||
invite_helper(&services, sender_user, recipient_user, &room_id, None, body.is_direct)
|
||||
.boxed()
|
||||
.await
|
||||
{
|
||||
@@ -450,6 +529,7 @@ fn default_power_levels_content(
|
||||
power_level_content_override: Option<&Raw<RoomPowerLevelsEventContent>>,
|
||||
visibility: &room::Visibility,
|
||||
users: BTreeMap<OwnedUserId, Int>,
|
||||
creators: Vec<OwnedUserId>,
|
||||
) -> Result<serde_json::Value> {
|
||||
let mut power_levels_content =
|
||||
serde_json::to_value(RoomPowerLevelsEventContent { users, ..Default::default() })
|
||||
@@ -499,6 +579,19 @@ fn default_power_levels_content(
|
||||
}
|
||||
}
|
||||
|
||||
if !creators.is_empty() {
|
||||
// Raise the default power level of tombstone to 150
|
||||
power_levels_content["events"]["m.room.tombstone"] =
|
||||
serde_json::to_value(150).expect("150 is valid Value");
|
||||
for creator in creators {
|
||||
// Omit creators from the power level list altogether
|
||||
power_levels_content["users"]
|
||||
.as_object_mut()
|
||||
.expect("users is an object")
|
||||
.remove(creator.as_str());
|
||||
}
|
||||
}
|
||||
|
||||
Ok(power_levels_content)
|
||||
}
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ pub(crate) async fn get_room_event_route(
|
||||
let event = services
|
||||
.rooms
|
||||
.timeline
|
||||
.get_pdu(event_id)
|
||||
.get_remote_pdu(room_id, event_id)
|
||||
.map_err(|_| err!(Request(NotFound("Event {} not found.", event_id))));
|
||||
|
||||
let visible = services
|
||||
@@ -33,11 +33,6 @@ pub(crate) async fn get_room_event_route(
|
||||
return Err!(Request(Forbidden("You don't have permission to view this event.")));
|
||||
}
|
||||
|
||||
debug_assert!(
|
||||
event.event_id() == event_id && event.room_id() == room_id,
|
||||
"Fetched PDU must match requested"
|
||||
);
|
||||
|
||||
event.add_age().ok();
|
||||
|
||||
Ok(get_room_event::v3::Response { event: event.into_format() })
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
use axum::extract::State;
|
||||
use conduwuit::{
|
||||
Err, Error, Event, Result, debug, err, info,
|
||||
Err, Error, Event, Result, RoomVersion, debug, err, info,
|
||||
matrix::{StateKey, pdu::PduBuilder},
|
||||
};
|
||||
use futures::{FutureExt, StreamExt};
|
||||
@@ -68,37 +68,77 @@ pub(crate) async fn upgrade_room_route(
|
||||
return Err!(Request(UserSuspended("You cannot perform this action while suspended.")));
|
||||
}
|
||||
|
||||
// First, check if the user has permission to upgrade the room (send tombstone
|
||||
// event)
|
||||
let old_room_state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
|
||||
|
||||
// Check tombstone permission by attempting to create (but not send) the event
|
||||
// Note that this does internally call the policy server with a fake room ID,
|
||||
// which may not be good?
|
||||
let tombstone_test_result = services
|
||||
.rooms
|
||||
.timeline
|
||||
.create_hash_and_sign_event(
|
||||
PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent {
|
||||
body: "This room has been replaced".to_owned(),
|
||||
replacement_room: RoomId::new(services.globals.server_name()),
|
||||
}),
|
||||
sender_user,
|
||||
Some(&body.room_id),
|
||||
&old_room_state_lock,
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Err(_e) = tombstone_test_result {
|
||||
return Err!(Request(Forbidden("User does not have permission to upgrade this room.")));
|
||||
}
|
||||
|
||||
drop(old_room_state_lock);
|
||||
|
||||
// Create a replacement room
|
||||
let replacement_room = RoomId::new(services.globals.server_name());
|
||||
let room_features = RoomVersion::new(&body.new_version)?;
|
||||
let replacement_room_owned = if !room_features.room_ids_as_hashes {
|
||||
Some(RoomId::new(services.globals.server_name()))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let replacement_room: Option<&RoomId> = replacement_room_owned.as_ref().map(AsRef::as_ref);
|
||||
let replacement_room_tmp = match replacement_room {
|
||||
| Some(v) => v,
|
||||
| None => &RoomId::new(services.globals.server_name()),
|
||||
};
|
||||
|
||||
let _short_id = services
|
||||
.rooms
|
||||
.short
|
||||
.get_or_create_shortroomid(&replacement_room)
|
||||
.get_or_create_shortroomid(replacement_room_tmp)
|
||||
.await;
|
||||
|
||||
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
|
||||
|
||||
// Send a m.room.tombstone event to the old room to indicate that it is not
|
||||
// intended to be used any further Fail if the sender does not have the required
|
||||
// permissions
|
||||
let tombstone_event_id = services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent {
|
||||
body: "This room has been replaced".to_owned(),
|
||||
replacement_room: replacement_room.clone(),
|
||||
}),
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Change lock to replacement room
|
||||
drop(state_lock);
|
||||
let state_lock = services.rooms.state.mutex.lock(&replacement_room).await;
|
||||
// For pre-v12 rooms, send tombstone before creating replacement room
|
||||
let tombstone_event_id = if !room_features.room_ids_as_hashes {
|
||||
let state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
|
||||
// Send a m.room.tombstone event to the old room to indicate that it is not
|
||||
// intended to be used any further
|
||||
let tombstone_event_id = services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent {
|
||||
body: "This room has been replaced".to_owned(),
|
||||
replacement_room: replacement_room.unwrap().to_owned(),
|
||||
}),
|
||||
sender_user,
|
||||
Some(&body.room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
// Change lock to replacement room
|
||||
drop(state_lock);
|
||||
Some(tombstone_event_id)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let state_lock = services.rooms.state.mutex.lock(replacement_room_tmp).await;
|
||||
|
||||
// Get the old room creation event
|
||||
let mut create_event_content: CanonicalJsonObject = services
|
||||
@@ -111,7 +151,7 @@ pub(crate) async fn upgrade_room_route(
|
||||
// Use the m.room.tombstone event as the predecessor
|
||||
let predecessor = Some(ruma::events::room::create::PreviousRoom::new(
|
||||
body.room_id.clone(),
|
||||
Some(tombstone_event_id),
|
||||
tombstone_event_id,
|
||||
));
|
||||
|
||||
// Send a m.room.create event containing a predecessor field and the applicable
|
||||
@@ -132,6 +172,7 @@ pub(crate) async fn upgrade_room_route(
|
||||
// "creator" key no longer exists in V11 rooms
|
||||
create_event_content.remove("creator");
|
||||
},
|
||||
// TODO(hydra): additional_creators
|
||||
}
|
||||
}
|
||||
|
||||
@@ -159,7 +200,7 @@ pub(crate) async fn upgrade_room_route(
|
||||
return Err(Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"));
|
||||
}
|
||||
|
||||
services
|
||||
let create_event_id = services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
@@ -173,11 +214,18 @@ pub(crate) async fn upgrade_room_route(
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
replacement_room,
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
.await?;
|
||||
let create_id = create_event_id.as_str().replace('$', "!");
|
||||
let (replacement_room, state_lock) = if room_features.room_ids_as_hashes {
|
||||
let parsed_room_id = RoomId::parse(&create_id)?;
|
||||
(Some(parsed_room_id), services.rooms.state.mutex.lock(parsed_room_id).await)
|
||||
} else {
|
||||
(replacement_room, state_lock)
|
||||
};
|
||||
|
||||
// Join the new room
|
||||
services
|
||||
@@ -204,7 +252,7 @@ pub(crate) async fn upgrade_room_route(
|
||||
timestamp: None,
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
replacement_room,
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
@@ -243,7 +291,7 @@ pub(crate) async fn upgrade_room_route(
|
||||
..Default::default()
|
||||
},
|
||||
sender_user,
|
||||
&replacement_room,
|
||||
replacement_room,
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
@@ -268,7 +316,7 @@ pub(crate) async fn upgrade_room_route(
|
||||
services
|
||||
.rooms
|
||||
.alias
|
||||
.set_alias(alias, &replacement_room, sender_user)?;
|
||||
.set_alias(alias, replacement_room.unwrap(), sender_user)?;
|
||||
}
|
||||
|
||||
// Get the old room power levels
|
||||
@@ -302,7 +350,7 @@ pub(crate) async fn upgrade_room_route(
|
||||
..power_levels_event_content
|
||||
}),
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
Some(&body.room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
@@ -310,6 +358,27 @@ pub(crate) async fn upgrade_room_route(
|
||||
|
||||
drop(state_lock);
|
||||
|
||||
// For v12 rooms, send tombstone AFTER creating replacement room
|
||||
if room_features.room_ids_as_hashes {
|
||||
let old_room_state_lock = services.rooms.state.mutex.lock(&body.room_id).await;
|
||||
// For v12 rooms, no event reference in predecessor due to cyclic dependency -
|
||||
// could best effort one maybe?
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
.build_and_append_pdu(
|
||||
PduBuilder::state(StateKey::new(), &RoomTombstoneEventContent {
|
||||
body: "This room has been replaced".to_owned(),
|
||||
replacement_room: replacement_room.unwrap().to_owned(),
|
||||
}),
|
||||
sender_user,
|
||||
Some(&body.room_id),
|
||||
&old_room_state_lock,
|
||||
)
|
||||
.await?;
|
||||
drop(old_room_state_lock);
|
||||
}
|
||||
|
||||
// Check if the old room has a space parent, and if so, whether we should update
|
||||
// it (m.space.parent, room_id)
|
||||
let parents = services
|
||||
@@ -334,8 +403,9 @@ pub(crate) async fn upgrade_room_route(
|
||||
continue;
|
||||
};
|
||||
debug!(
|
||||
"Updating space {space_id} child event for room {} to {replacement_room}",
|
||||
&body.room_id
|
||||
"Updating space {space_id} child event for room {} to {}",
|
||||
&body.room_id,
|
||||
replacement_room.unwrap()
|
||||
);
|
||||
// First, drop the space's child event
|
||||
let state_lock = services.rooms.state.mutex.lock(space_id).await;
|
||||
@@ -352,14 +422,17 @@ pub(crate) async fn upgrade_room_route(
|
||||
..Default::default()
|
||||
},
|
||||
sender_user,
|
||||
space_id,
|
||||
Some(space_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
.await
|
||||
.ok();
|
||||
// Now, add a new child event for the replacement room
|
||||
debug!("Adding space child event for room {replacement_room} in space {space_id}");
|
||||
debug!(
|
||||
"Adding space child event for room {} in space {space_id}",
|
||||
replacement_room.unwrap()
|
||||
);
|
||||
services
|
||||
.rooms
|
||||
.timeline
|
||||
@@ -372,23 +445,26 @@ pub(crate) async fn upgrade_room_route(
|
||||
suggested: child.suggested,
|
||||
})
|
||||
.expect("event is valid, we just created it"),
|
||||
state_key: Some(replacement_room.as_str().into()),
|
||||
state_key: Some(replacement_room.unwrap().as_str().into()),
|
||||
..Default::default()
|
||||
},
|
||||
sender_user,
|
||||
space_id,
|
||||
Some(space_id),
|
||||
&state_lock,
|
||||
)
|
||||
.boxed()
|
||||
.await
|
||||
.ok();
|
||||
debug!(
|
||||
"Finished updating space {space_id} child event for room {} to {replacement_room}",
|
||||
&body.room_id
|
||||
"Finished updating space {space_id} child event for room {} to {}",
|
||||
&body.room_id,
|
||||
replacement_room.unwrap()
|
||||
);
|
||||
drop(state_lock);
|
||||
}
|
||||
|
||||
// Return the replacement room id
|
||||
Ok(upgrade_room::v3::Response { replacement_room })
|
||||
Ok(upgrade_room::v3::Response {
|
||||
replacement_room: replacement_room.unwrap().to_owned(),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ pub(crate) async fn send_message_event_route(
|
||||
..Default::default()
|
||||
},
|
||||
sender_user,
|
||||
&body.room_id,
|
||||
Some(&body.room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -3,13 +3,14 @@
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{
|
||||
Err, Error, Result, debug, err, info, utils,
|
||||
utils::{ReadyExt, hash},
|
||||
Err, Error, Result, debug, err, info,
|
||||
utils::{self, ReadyExt, hash},
|
||||
};
|
||||
use conduwuit_service::uiaa::SESSION_ID_LENGTH;
|
||||
use conduwuit_core::{debug_error, debug_warn};
|
||||
use conduwuit_service::{Services, uiaa::SESSION_ID_LENGTH};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
UserId,
|
||||
OwnedUserId, UserId,
|
||||
api::client::{
|
||||
session::{
|
||||
get_login_token,
|
||||
@@ -49,6 +50,154 @@ pub(crate) async fn get_login_types_route(
|
||||
]))
|
||||
}
|
||||
|
||||
/// Authenticates the given user by its ID and its password.
|
||||
///
|
||||
/// Returns the user ID if successful, and an error otherwise.
|
||||
#[tracing::instrument(skip_all, fields(%user_id), name = "password")]
|
||||
pub(crate) async fn password_login(
|
||||
services: &Services,
|
||||
user_id: &UserId,
|
||||
lowercased_user_id: &UserId,
|
||||
password: &str,
|
||||
) -> Result<OwnedUserId> {
|
||||
// Restrict login to accounts only of type 'password', including untyped
|
||||
// legacy accounts which are equivalent to 'password'.
|
||||
if services
|
||||
.users
|
||||
.origin(user_id)
|
||||
.await
|
||||
.is_ok_and(|origin| origin != "password")
|
||||
{
|
||||
return Err!(Request(Forbidden("Account does not permit password login.")));
|
||||
}
|
||||
|
||||
let (hash, user_id) = match services.users.password_hash(user_id).await {
|
||||
| Ok(hash) => (hash, user_id),
|
||||
| Err(_) => services
|
||||
.users
|
||||
.password_hash(lowercased_user_id)
|
||||
.await
|
||||
.map(|hash| (hash, lowercased_user_id))
|
||||
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?,
|
||||
};
|
||||
|
||||
if hash.is_empty() {
|
||||
return Err!(Request(UserDeactivated("The user has been deactivated")));
|
||||
}
|
||||
|
||||
hash::verify_password(password, &hash)
|
||||
.inspect_err(|e| debug_error!("{e}"))
|
||||
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
|
||||
|
||||
Ok(user_id.to_owned())
|
||||
}
|
||||
|
||||
/// Authenticates the given user through the configured LDAP server.
|
||||
///
|
||||
/// Creates the user if the user is found in the LDAP and do not already have an
|
||||
/// account.
|
||||
#[tracing::instrument(skip_all, fields(%user_id), name = "ldap")]
|
||||
pub(super) async fn ldap_login(
|
||||
services: &Services,
|
||||
user_id: &UserId,
|
||||
lowercased_user_id: &UserId,
|
||||
password: &str,
|
||||
) -> Result<OwnedUserId> {
|
||||
let (user_dn, is_ldap_admin) = match services.config.ldap.bind_dn.as_ref() {
|
||||
| Some(bind_dn) if bind_dn.contains("{username}") =>
|
||||
(bind_dn.replace("{username}", lowercased_user_id.localpart()), false),
|
||||
| _ => {
|
||||
debug!("Searching user in LDAP");
|
||||
|
||||
let dns = services.users.search_ldap(user_id).await?;
|
||||
if dns.len() >= 2 {
|
||||
return Err!(Ldap("LDAP search returned two or more results"));
|
||||
}
|
||||
|
||||
let Some((user_dn, is_admin)) = dns.first() else {
|
||||
return password_login(services, user_id, lowercased_user_id, password).await;
|
||||
};
|
||||
|
||||
(user_dn.clone(), *is_admin)
|
||||
},
|
||||
};
|
||||
|
||||
let user_id = services
|
||||
.users
|
||||
.auth_ldap(&user_dn, password)
|
||||
.await
|
||||
.map(|()| lowercased_user_id.to_owned())?;
|
||||
|
||||
// LDAP users are automatically created on first login attempt. This is a very
|
||||
// common feature that can be seen on many services using a LDAP provider for
|
||||
// their users (synapse, Nextcloud, Jellyfin, ...).
|
||||
//
|
||||
// LDAP users are crated with a dummy password but non empty because an empty
|
||||
// password is reserved for deactivated accounts. The conduwuit password field
|
||||
// will never be read to login a LDAP user so it's not an issue.
|
||||
if !services.users.exists(lowercased_user_id).await {
|
||||
services
|
||||
.users
|
||||
.create(lowercased_user_id, Some("*"), Some("ldap"))
|
||||
.await?;
|
||||
}
|
||||
|
||||
let is_conduwuit_admin = services.admin.user_is_admin(lowercased_user_id).await;
|
||||
|
||||
if is_ldap_admin && !is_conduwuit_admin {
|
||||
Box::pin(services.admin.make_user_admin(lowercased_user_id)).await?;
|
||||
} else if !is_ldap_admin && is_conduwuit_admin {
|
||||
Box::pin(services.admin.revoke_admin(lowercased_user_id)).await?;
|
||||
}
|
||||
|
||||
Ok(user_id)
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_login(
|
||||
services: &Services,
|
||||
body: &Ruma<login::v3::Request>,
|
||||
identifier: Option<&uiaa::UserIdentifier>,
|
||||
password: &str,
|
||||
user: Option<&String>,
|
||||
) -> Result<OwnedUserId> {
|
||||
debug!("Got password login type");
|
||||
let user_id =
|
||||
if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||
UserId::parse_with_server_name(user_id, &services.config.server_name)
|
||||
} else if let Some(user) = user {
|
||||
UserId::parse_with_server_name(user, &services.config.server_name)
|
||||
} else {
|
||||
return Err!(Request(Unknown(
|
||||
debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)")
|
||||
)));
|
||||
}
|
||||
.map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?;
|
||||
|
||||
let lowercased_user_id = UserId::parse_with_server_name(
|
||||
user_id.localpart().to_lowercase(),
|
||||
&services.config.server_name,
|
||||
)?;
|
||||
|
||||
if !services.globals.user_is_local(&user_id)
|
||||
|| !services.globals.user_is_local(&lowercased_user_id)
|
||||
{
|
||||
return Err!(Request(Unknown("User ID does not belong to this homeserver")));
|
||||
}
|
||||
|
||||
if cfg!(feature = "ldap") && services.config.ldap.enable {
|
||||
match Box::pin(ldap_login(services, &user_id, &lowercased_user_id, password)).await {
|
||||
| Ok(user_id) => Ok(user_id),
|
||||
| Err(err) if services.config.ldap.ldap_only => Err(err),
|
||||
| Err(err) => {
|
||||
debug_warn!("{err}");
|
||||
password_login(services, &user_id, &lowercased_user_id, password).await
|
||||
},
|
||||
}
|
||||
} else {
|
||||
password_login(services, &user_id, &lowercased_user_id, password).await
|
||||
}
|
||||
}
|
||||
|
||||
/// # `POST /_matrix/client/v3/login`
|
||||
///
|
||||
/// Authenticates the user and returns an access token it can use in subsequent
|
||||
@@ -80,70 +229,7 @@ pub(crate) async fn login_route(
|
||||
password,
|
||||
user,
|
||||
..
|
||||
}) => {
|
||||
debug!("Got password login type");
|
||||
let user_id =
|
||||
if let Some(uiaa::UserIdentifier::UserIdOrLocalpart(user_id)) = identifier {
|
||||
UserId::parse_with_server_name(user_id, &services.config.server_name)
|
||||
} else if let Some(user) = user {
|
||||
UserId::parse_with_server_name(user, &services.config.server_name)
|
||||
} else {
|
||||
return Err!(Request(Unknown(
|
||||
debug_warn!(?body.login_info, "Valid identifier or username was not provided (invalid or unsupported login type?)")
|
||||
)));
|
||||
}
|
||||
.map_err(|e| err!(Request(InvalidUsername(warn!("Username is invalid: {e}")))))?;
|
||||
|
||||
let lowercased_user_id = UserId::parse_with_server_name(
|
||||
user_id.localpart().to_lowercase(),
|
||||
&services.config.server_name,
|
||||
)?;
|
||||
|
||||
if !services.globals.user_is_local(&user_id)
|
||||
|| !services.globals.user_is_local(&lowercased_user_id)
|
||||
{
|
||||
return Err!(Request(Unknown("User ID does not belong to this homeserver")));
|
||||
}
|
||||
|
||||
// first try the username as-is
|
||||
let hash = services
|
||||
.users
|
||||
.password_hash(&user_id)
|
||||
.await
|
||||
.inspect_err(|e| debug!("{e}"));
|
||||
|
||||
match hash {
|
||||
| Ok(hash) => {
|
||||
if hash.is_empty() {
|
||||
return Err!(Request(UserDeactivated("The user has been deactivated")));
|
||||
}
|
||||
|
||||
hash::verify_password(password, &hash)
|
||||
.inspect_err(|e| debug!("{e}"))
|
||||
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
|
||||
|
||||
user_id
|
||||
},
|
||||
| Err(_e) => {
|
||||
let hash_lowercased_user_id = services
|
||||
.users
|
||||
.password_hash(&lowercased_user_id)
|
||||
.await
|
||||
.inspect_err(|e| debug!("{e}"))
|
||||
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
|
||||
|
||||
if hash_lowercased_user_id.is_empty() {
|
||||
return Err!(Request(UserDeactivated("The user has been deactivated")));
|
||||
}
|
||||
|
||||
hash::verify_password(password, &hash_lowercased_user_id)
|
||||
.inspect_err(|e| debug!("{e}"))
|
||||
.map_err(|_| err!(Request(Forbidden("Wrong username or password."))))?;
|
||||
|
||||
lowercased_user_id
|
||||
},
|
||||
}
|
||||
},
|
||||
}) => handle_login(&services, &body, identifier.as_ref(), password, user.as_ref()).await?,
|
||||
| login::v3::LoginInfo::Token(login::v3::Token { token }) => {
|
||||
debug!("Got token login type");
|
||||
if !services.server.config.login_via_existing_session {
|
||||
@@ -198,8 +284,8 @@ pub(crate) async fn login_route(
|
||||
.clone()
|
||||
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
||||
|
||||
// Generate a new token for the device
|
||||
let token = utils::random_string(TOKEN_LENGTH);
|
||||
// Generate a new token for the device (ensuring no collisions)
|
||||
let token = services.users.generate_unique_token().await;
|
||||
|
||||
// Determine if device_id was provided and exists in the db for this user
|
||||
let device_exists = if body.device_id.is_some() {
|
||||
|
||||
@@ -201,7 +201,7 @@ async fn send_state_event_for_key_helper(
|
||||
..Default::default()
|
||||
},
|
||||
sender,
|
||||
room_id,
|
||||
Some(room_id),
|
||||
&state_lock,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -60,7 +60,10 @@
|
||||
use service::rooms::short::{ShortEventId, ShortStateKey};
|
||||
|
||||
use super::{load_timeline, share_encrypted_room};
|
||||
use crate::{Ruma, RumaResponse, client::ignored_filter};
|
||||
use crate::{
|
||||
Ruma, RumaResponse,
|
||||
client::{ignored_filter, is_ignored_invite},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
struct StateChanges {
|
||||
@@ -238,6 +241,13 @@ pub(crate) async fn build_sync_events(
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_invited(sender_user)
|
||||
.wide_filter_map(async |(room_id, invite_state)| {
|
||||
if is_ignored_invite(services, sender_user, &room_id).await {
|
||||
None
|
||||
} else {
|
||||
Some((room_id, invite_state))
|
||||
}
|
||||
})
|
||||
.fold_default(|mut invited_rooms: BTreeMap<_, _>, (room_id, invite_state)| async move {
|
||||
let invite_count = services
|
||||
.rooms
|
||||
@@ -430,7 +440,7 @@ async fn handle_left_room(
|
||||
.ok();
|
||||
|
||||
// Left before last sync
|
||||
if Some(since) >= left_count {
|
||||
if (Some(since) >= left_count && !include_leave) || Some(next_batch) < left_count {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
@@ -457,7 +467,7 @@ async fn handle_left_room(
|
||||
state_key: Some(sender_user.as_str().into()),
|
||||
unsigned: None,
|
||||
// The following keys are dropped on conversion
|
||||
room_id: room_id.clone(),
|
||||
room_id: Some(room_id.clone()),
|
||||
prev_events: vec![],
|
||||
depth: uint!(1),
|
||||
auth_events: vec![],
|
||||
|
||||
@@ -11,6 +11,7 @@
|
||||
utils::{
|
||||
BoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
math::{ruma_from_usize, usize_from_ruma, usize_from_u64_truncated},
|
||||
stream::WidebandExt,
|
||||
},
|
||||
warn,
|
||||
};
|
||||
@@ -39,12 +40,13 @@
|
||||
use super::{load_timeline, share_encrypted_room};
|
||||
use crate::{
|
||||
Ruma,
|
||||
client::{DEFAULT_BUMP_TYPES, ignored_filter},
|
||||
client::{DEFAULT_BUMP_TYPES, ignored_filter, is_ignored_invite},
|
||||
};
|
||||
|
||||
type TodoRooms = BTreeMap<OwnedRoomId, (BTreeSet<TypeStateKey>, usize, u64)>;
|
||||
const SINGLE_CONNECTION_SYNC: &str = "single_connection_sync";
|
||||
|
||||
#[allow(clippy::cognitive_complexity)]
|
||||
/// POST `/_matrix/client/unstable/org.matrix.msc3575/sync`
|
||||
///
|
||||
/// Sliding Sync endpoint (future endpoint: `/_matrix/client/v4/sync`)
|
||||
@@ -101,6 +103,13 @@ pub(crate) async fn sync_events_v4_route(
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_invited(sender_user)
|
||||
.wide_filter_map(async |(room_id, invite_state)| {
|
||||
if is_ignored_invite(&services, sender_user, &room_id).await {
|
||||
None
|
||||
} else {
|
||||
Some((room_id, invite_state))
|
||||
}
|
||||
})
|
||||
.map(|r| r.0)
|
||||
.collect()
|
||||
.await;
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt,
|
||||
future::ReadyEqExt,
|
||||
math::{ruma_from_usize, usize_from_ruma},
|
||||
stream::WidebandExt,
|
||||
},
|
||||
warn,
|
||||
};
|
||||
@@ -38,7 +39,7 @@
|
||||
use super::share_encrypted_room;
|
||||
use crate::{
|
||||
Ruma,
|
||||
client::{DEFAULT_BUMP_TYPES, ignored_filter, sync::load_timeline},
|
||||
client::{DEFAULT_BUMP_TYPES, ignored_filter, is_ignored_invite, sync::load_timeline},
|
||||
};
|
||||
|
||||
type SyncInfo<'a> = (&'a UserId, &'a DeviceId, u64, &'a sync_events::v5::Request);
|
||||
@@ -106,6 +107,13 @@ pub(crate) async fn sync_events_v5_route(
|
||||
.rooms
|
||||
.state_cache
|
||||
.rooms_invited(sender_user)
|
||||
.wide_filter_map(async |(room_id, invite_state)| {
|
||||
if is_ignored_invite(services, sender_user, &room_id).await {
|
||||
None
|
||||
} else {
|
||||
Some((room_id, invite_state))
|
||||
}
|
||||
})
|
||||
.map(|r| r.0)
|
||||
.collect::<Vec<OwnedRoomId>>();
|
||||
|
||||
@@ -312,6 +320,7 @@ async fn handle_lists<'a, Rooms, AllRooms>(
|
||||
|
||||
for mut range in ranges {
|
||||
range.0 = uint!(0);
|
||||
range.1 = range.1.checked_add(uint!(1)).unwrap_or(range.1);
|
||||
range.1 = range
|
||||
.1
|
||||
.clamp(range.0, UInt::try_from(active_rooms.len()).unwrap_or(UInt::MAX));
|
||||
|
||||
@@ -2,18 +2,14 @@
|
||||
|
||||
use axum::extract::State;
|
||||
use axum_client_ip::InsecureClientIp;
|
||||
use conduwuit::{Err, Error, Result};
|
||||
use conduwuit::{Err, Result};
|
||||
use futures::StreamExt;
|
||||
use ruma::{
|
||||
OwnedRoomId,
|
||||
api::{
|
||||
client::{
|
||||
error::ErrorKind,
|
||||
membership::mutual_rooms,
|
||||
profile::{
|
||||
delete_profile_key, delete_timezone_key, get_profile_key, get_timezone_key,
|
||||
set_profile_key, set_timezone_key,
|
||||
},
|
||||
profile::{delete_profile_key, get_profile_key, set_profile_key},
|
||||
},
|
||||
federation,
|
||||
},
|
||||
@@ -60,62 +56,6 @@ pub(crate) async fn get_mutual_rooms_route(
|
||||
})
|
||||
}
|
||||
|
||||
/// # `DELETE /_matrix/client/unstable/uk.tcpip.msc4133/profile/:user_id/us.cloke.msc4175.tz`
|
||||
///
|
||||
/// Deletes the `tz` (timezone) of a user, as per MSC4133 and MSC4175.
|
||||
///
|
||||
/// - Also makes sure other users receive the update using presence EDUs
|
||||
pub(crate) async fn delete_timezone_key_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<delete_timezone_key::unstable::Request>,
|
||||
) -> Result<delete_timezone_key::unstable::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
if *sender_user != body.user_id && body.appservice_info.is_none() {
|
||||
return Err!(Request(Forbidden("You cannot update the profile of another user")));
|
||||
}
|
||||
|
||||
services.users.set_timezone(&body.user_id, None);
|
||||
|
||||
if services.config.allow_local_presence {
|
||||
// Presence update
|
||||
services
|
||||
.presence
|
||||
.ping_presence(&body.user_id, &PresenceState::Online)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(delete_timezone_key::unstable::Response {})
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/unstable/uk.tcpip.msc4133/profile/:user_id/us.cloke.msc4175.tz`
|
||||
///
|
||||
/// Updates the `tz` (timezone) of a user, as per MSC4133 and MSC4175.
|
||||
///
|
||||
/// - Also makes sure other users receive the update using presence EDUs
|
||||
pub(crate) async fn set_timezone_key_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<set_timezone_key::unstable::Request>,
|
||||
) -> Result<set_timezone_key::unstable::Response> {
|
||||
let sender_user = body.sender_user();
|
||||
|
||||
if *sender_user != body.user_id && body.appservice_info.is_none() {
|
||||
return Err!(Request(Forbidden("You cannot update the profile of another user")));
|
||||
}
|
||||
|
||||
services.users.set_timezone(&body.user_id, body.tz.clone());
|
||||
|
||||
if services.config.allow_local_presence {
|
||||
// Presence update
|
||||
services
|
||||
.presence
|
||||
.ping_presence(&body.user_id, &PresenceState::Online)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(set_timezone_key::unstable::Response {})
|
||||
}
|
||||
|
||||
/// # `PUT /_matrix/client/unstable/uk.tcpip.msc4133/profile/{user_id}/{field}`
|
||||
///
|
||||
/// Updates the profile key-value field of a user, as per MSC4133.
|
||||
@@ -150,19 +90,14 @@ pub(crate) async fn set_profile_key_route(
|
||||
)));
|
||||
};
|
||||
|
||||
if body
|
||||
.kv_pair
|
||||
.keys()
|
||||
.any(|key| key.starts_with("u.") && !profile_key_value.is_string())
|
||||
{
|
||||
return Err!(Request(BadJson("u.* profile key fields must be strings")));
|
||||
}
|
||||
|
||||
if body.kv_pair.keys().any(|key| key.len() > 128) {
|
||||
return Err!(Request(BadJson("Key names cannot be longer than 128 bytes")));
|
||||
}
|
||||
|
||||
if body.key_name == "displayname" {
|
||||
let Some(display_name) = profile_key_value.as_str() else {
|
||||
return Err!(Request(BadJson("displayname must be a string")));
|
||||
};
|
||||
let all_joined_rooms: Vec<OwnedRoomId> = services
|
||||
.rooms
|
||||
.state_cache
|
||||
@@ -174,12 +109,15 @@ pub(crate) async fn set_profile_key_route(
|
||||
update_displayname(
|
||||
&services,
|
||||
&body.user_id,
|
||||
Some(profile_key_value.to_string()),
|
||||
Some(display_name.to_owned()),
|
||||
&all_joined_rooms,
|
||||
)
|
||||
.await;
|
||||
} else if body.key_name == "avatar_url" {
|
||||
let mxc = ruma::OwnedMxcUri::from(profile_key_value.to_string());
|
||||
let Some(avatar_url) = profile_key_value.as_str() else {
|
||||
return Err!(Request(BadJson("avatar_url must be a string")));
|
||||
};
|
||||
let mxc = ruma::OwnedMxcUri::from(avatar_url);
|
||||
|
||||
let all_joined_rooms: Vec<OwnedRoomId> = services
|
||||
.rooms
|
||||
@@ -268,70 +206,12 @@ pub(crate) async fn delete_profile_key_route(
|
||||
Ok(delete_profile_key::unstable::Response {})
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/unstable/uk.tcpip.msc4133/profile/:user_id/us.cloke.msc4175.tz`
|
||||
///
|
||||
/// Returns the `timezone` of the user as per MSC4133 and MSC4175.
|
||||
///
|
||||
/// - If user is on another server and we do not have a local copy already fetch
|
||||
/// `timezone` over federation
|
||||
pub(crate) async fn get_timezone_key_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<get_timezone_key::unstable::Request>,
|
||||
) -> Result<get_timezone_key::unstable::Response> {
|
||||
if !services.globals.user_is_local(&body.user_id) {
|
||||
// Create and update our local copy of the user
|
||||
if let Ok(response) = services
|
||||
.sending
|
||||
.send_federation_request(
|
||||
body.user_id.server_name(),
|
||||
federation::query::get_profile_information::v1::Request {
|
||||
user_id: body.user_id.clone(),
|
||||
field: None, // we want the full user's profile to update locally as well
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
services.users.create(&body.user_id, None)?;
|
||||
}
|
||||
|
||||
services
|
||||
.users
|
||||
.set_displayname(&body.user_id, response.displayname.clone());
|
||||
|
||||
services
|
||||
.users
|
||||
.set_avatar_url(&body.user_id, response.avatar_url.clone());
|
||||
|
||||
services
|
||||
.users
|
||||
.set_blurhash(&body.user_id, response.blurhash.clone());
|
||||
|
||||
services
|
||||
.users
|
||||
.set_timezone(&body.user_id, response.tz.clone());
|
||||
|
||||
return Ok(get_timezone_key::unstable::Response { tz: response.tz });
|
||||
}
|
||||
}
|
||||
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
// Return 404 if this user doesn't exist and we couldn't fetch it over
|
||||
// federation
|
||||
return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found."));
|
||||
}
|
||||
|
||||
Ok(get_timezone_key::unstable::Response {
|
||||
tz: services.users.timezone(&body.user_id).await.ok(),
|
||||
})
|
||||
}
|
||||
|
||||
/// # `GET /_matrix/client/unstable/uk.tcpip.msc4133/profile/{userId}/{field}}`
|
||||
///
|
||||
/// Gets the profile key-value field of a user, as per MSC4133.
|
||||
///
|
||||
/// - If user is on another server and we do not have a local copy already fetch
|
||||
/// `timezone` over federation
|
||||
/// the value over federation
|
||||
pub(crate) async fn get_profile_key_route(
|
||||
State(services): State<crate::State>,
|
||||
body: Ruma<get_profile_key::unstable::Request>,
|
||||
@@ -352,7 +232,7 @@ pub(crate) async fn get_profile_key_route(
|
||||
.await
|
||||
{
|
||||
if !services.users.exists(&body.user_id).await {
|
||||
services.users.create(&body.user_id, None)?;
|
||||
services.users.create(&body.user_id, None, None).await?;
|
||||
}
|
||||
|
||||
services
|
||||
@@ -367,10 +247,6 @@ pub(crate) async fn get_profile_key_route(
|
||||
.users
|
||||
.set_blurhash(&body.user_id, response.blurhash.clone());
|
||||
|
||||
services
|
||||
.users
|
||||
.set_timezone(&body.user_id, response.tz.clone());
|
||||
|
||||
match response.custom_profile_fields.get(&body.key_name) {
|
||||
| Some(value) => {
|
||||
profile_key_value.insert(body.key_name.clone(), value.clone());
|
||||
|
||||
@@ -58,6 +58,8 @@ pub(crate) async fn get_supported_versions_route(
|
||||
("uk.tcpip.msc4133".to_owned(), true), /* Extending User Profile API with Key:Value Pairs (https://github.com/matrix-org/matrix-spec-proposals/pull/4133) */
|
||||
("us.cloke.msc4175".to_owned(), true), /* Profile field for user time zone (https://github.com/matrix-org/matrix-spec-proposals/pull/4175) */
|
||||
("org.matrix.simplified_msc3575".to_owned(), true), /* Simplified Sliding sync (https://github.com/matrix-org/matrix-spec-proposals/pull/4186) */
|
||||
("uk.timedout.msc4323".to_owned(), true), /* agnostic suspend (https://github.com/matrix-org/matrix-spec-proposals/pull/4323) */
|
||||
("org.matrix.msc4155".to_owned(), true), /* invite filtering (https://github.com/matrix-org/matrix-spec-proposals/pull/4155) */
|
||||
]),
|
||||
};
|
||||
|
||||
|
||||
@@ -22,12 +22,9 @@
|
||||
pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
||||
let config = &server.config;
|
||||
let mut router = router
|
||||
.ruma_route(&client::get_timezone_key_route)
|
||||
.ruma_route(&client::get_profile_key_route)
|
||||
.ruma_route(&client::set_profile_key_route)
|
||||
.ruma_route(&client::delete_profile_key_route)
|
||||
.ruma_route(&client::set_timezone_key_route)
|
||||
.ruma_route(&client::delete_timezone_key_route)
|
||||
.ruma_route(&client::appservice_ping)
|
||||
.ruma_route(&client::get_supported_versions_route)
|
||||
.ruma_route(&client::get_register_available_route)
|
||||
@@ -184,6 +181,8 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
|
||||
"/_matrix/client/unstable/im.nheko.summary/rooms/:room_id_or_alias/summary",
|
||||
get(client::get_room_summary_legacy)
|
||||
)
|
||||
.ruma_route(&client::get_suspended_status)
|
||||
.ruma_route(&client::put_suspended_status)
|
||||
.ruma_route(&client::well_known_support)
|
||||
.ruma_route(&client::well_known_client)
|
||||
.route("/_conduwuit/server_version", get(client::conduwuit_server_version))
|
||||
|
||||
@@ -5,6 +5,14 @@
|
||||
typed_header::TypedHeaderRejectionReason,
|
||||
};
|
||||
use conduwuit::{Err, Error, Result, debug_error, err, warn};
|
||||
use futures::{
|
||||
TryFutureExt,
|
||||
future::{
|
||||
Either::{Left, Right},
|
||||
select_ok,
|
||||
},
|
||||
pin_mut,
|
||||
};
|
||||
use ruma::{
|
||||
CanonicalJsonObject, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, OwnedUserId, UserId,
|
||||
api::{
|
||||
@@ -12,9 +20,7 @@
|
||||
client::{
|
||||
directory::get_public_rooms,
|
||||
error::ErrorKind,
|
||||
profile::{
|
||||
get_avatar_url, get_display_name, get_profile, get_profile_key, get_timezone_key,
|
||||
},
|
||||
profile::{get_avatar_url, get_display_name, get_profile, get_profile_key},
|
||||
voip::get_turn_server_info,
|
||||
},
|
||||
federation::{authentication::XMatrix, openid::get_openid_userinfo},
|
||||
@@ -54,17 +60,7 @@ pub(super) async fn auth(
|
||||
| None => request.query.access_token.as_deref(),
|
||||
};
|
||||
|
||||
let token = if let Some(token) = token {
|
||||
match services.appservice.find_from_token(token).await {
|
||||
| Some(reg_info) => Token::Appservice(Box::new(reg_info)),
|
||||
| _ => match services.users.find_from_token(token).await {
|
||||
| Ok((user_id, device_id)) => Token::User((user_id, device_id)),
|
||||
| _ => Token::Invalid,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
Token::None
|
||||
};
|
||||
let token = find_token(services, token).await?;
|
||||
|
||||
if metadata.authentication == AuthScheme::None {
|
||||
match metadata {
|
||||
@@ -91,8 +87,7 @@ pub(super) async fn auth(
|
||||
| &get_profile::v3::Request::METADATA
|
||||
| &get_profile_key::unstable::Request::METADATA
|
||||
| &get_display_name::v3::Request::METADATA
|
||||
| &get_avatar_url::v3::Request::METADATA
|
||||
| &get_timezone_key::unstable::Request::METADATA => {
|
||||
| &get_avatar_url::v3::Request::METADATA => {
|
||||
if services.server.config.require_auth_for_profile_requests {
|
||||
match token {
|
||||
| Token::Appservice(_) | Token::User(_) => {
|
||||
@@ -342,3 +337,25 @@ async fn parse_x_matrix(request: &mut Request) -> Result<XMatrix> {
|
||||
|
||||
Ok(x_matrix)
|
||||
}
|
||||
|
||||
async fn find_token(services: &Services, token: Option<&str>) -> Result<Token> {
|
||||
let Some(token) = token else {
|
||||
return Ok(Token::None);
|
||||
};
|
||||
|
||||
let user_token = services.users.find_from_token(token).map_ok(Token::User);
|
||||
|
||||
let appservice_token = services
|
||||
.appservice
|
||||
.find_from_token(token)
|
||||
.map_ok(Box::new)
|
||||
.map_ok(Token::Appservice);
|
||||
|
||||
pin_mut!(user_token, appservice_token);
|
||||
// Returns Ok if either token type succeeds, Err only if both fail
|
||||
match select_ok([Left(user_token), Right(appservice_token)]).await {
|
||||
| Err(e) if !e.is_not_found() => Err(e),
|
||||
| Ok((token, _)) => Ok(token),
|
||||
| _ => Ok(Token::Invalid),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,6 +34,19 @@ pub(super) async fn from(
|
||||
|
||||
let max_body_size = services.server.config.max_request_size;
|
||||
|
||||
// Check if the Content-Length header is present and valid, saves us streaming
|
||||
// the response into memory
|
||||
if let Some(content_length) = parts.headers.get(http::header::CONTENT_LENGTH) {
|
||||
if let Ok(content_length) = content_length
|
||||
.to_str()
|
||||
.map(|s| s.parse::<usize>().unwrap_or_default())
|
||||
{
|
||||
if content_length > max_body_size {
|
||||
return Err(err!(Request(TooLarge("Request body too large"))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let body = axum::body::to_bytes(body, max_body_size)
|
||||
.await
|
||||
.map_err(|e| err!(Request(TooLarge("Request body too large: {e}"))))?;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user