Compare commits

..

32 Commits

Author SHA1 Message Date
Jade Ellis
b6c5991e1f chore(deps): Update rand
A couple indirect deps are still on rand_core 0.6 but we can deal
2026-02-20 22:57:45 +00:00
Katie Kloss
efd879fcd8 docs: Add news fragment 2026-02-20 10:13:54 +00:00
Katie Kloss
92a848f74d fix: Crash before starting on OpenBSD
core_affinity doesn't return any cores on OpenBSD, so we try to
clamp(1, 0). This is Less Good than fixing that crate, but at
least allows the server to start up.
2026-02-20 10:13:54 +00:00
Renovate Bot
776b5865ba chore(deps): update sentry-rust monorepo to 0.46.0 2026-02-19 14:56:25 +00:00
timedout
722bacbe89 chore: Fix busted lockfile merge 2026-02-19 02:33:41 +00:00
Jade Ellis
46907e3dce chore: Migrate to axum 0.8
Co-authored-by: dasha_uwu
2026-02-19 02:18:29 +00:00
timedout
31e2195e56 fix: Remove non-compliant and non-functional non-authoritative directory queries
chore: Add news frag
2026-02-19 01:37:42 +00:00
Terry
7ecac93ddc fix: Remove rocksdb secondary mode 2026-02-18 23:11:53 +00:00
Terry
6a0b103722 docs: Changelog 2026-02-18 23:11:53 +00:00
Terry
23d77b614f fix: Remove ability to set rocksdb as read only 2026-02-18 23:11:53 +00:00
stratself
e01aa44b16 fix: add nodejs URL in CONTRIBUTING.md page 2026-02-18 23:07:29 +00:00
stratself
a08739c246 docs: rewrite how to load docs with new rspress engine 2026-02-18 23:07:29 +00:00
Ginger
c14864b881 fix: Wording fixes 2026-02-18 14:41:03 +00:00
Ginger
1773e72e68 feat(docs): Add a note about !779 to the troubleshooting page 2026-02-18 14:41:03 +00:00
kraem
0f94d55689 fix: don't warn about needed backfill via federation for non-federated rooms 2026-02-18 14:27:14 +00:00
Renovate Bot
abfb6377c2 chore(deps): update rust-patch-updates 2026-02-18 14:26:49 +00:00
Renovate Bot
91d64f5b24 chore(deps): update rust crate askama to 0.15.0 2026-02-18 05:04:23 +00:00
Jade Ellis
9a3f3f6e78 ci: Explicitly enable Dependency Dashboard 2026-02-17 21:33:30 +00:00
Jade Ellis
b3e31a4aad ci(deps): Automerge typos updates 2026-02-17 21:33:13 +00:00
Jade Ellis
8cda431cc6 ci(deps): Group npm patch updates 2026-02-17 21:30:51 +00:00
Renovate Bot
02b9a3f713 chore(deps): update pre-commit hook crate-ci/typos to v1.43.5 2026-02-17 05:03:45 +00:00
timedout
d40893730c chore: Lighten the phrasing 2026-02-17 02:07:19 +00:00
timedout
28fae58cf6 chore: Add news frag & rebuild config 2026-02-17 02:07:19 +00:00
timedout
f458f6ab76 chore: Disable presence by default, and add warnings to other heavy ops 2026-02-17 02:07:19 +00:00
Shane Jaroch
fdf9cea533 fix(admin-cli): concatenation/formatting error, i.e.,
**NOTE:** If there are any features, tools, or admin internals dependent on this output that would break, let me know!
I'm hoping this is acceptable, since it's a human-readable command.

Current output:

```shell
uwu> server list-backups
    #1 Mon, 9 Feb 2026 20:36:25 +0000: 66135580 bytes, 595 files#2 Wed, 11 Feb 2026 02:33:15 +0000: 270963746 bytes, 1002 files#3 Sat, 14 Feb 2026 22:11:19 +0000: 675905487 bytes, 2139 files
```

Should be:

```shell
uwu> server list-backups
    #1 Mon, 9 Feb 2026 20:36:25 +0000: 66135580 bytes, 595 files
    #2 Wed, 11 Feb 2026 02:33:15 +0000: 270963746 bytes, 1002 files
    #3 Sat, 14 Feb 2026 22:11:19 +0000: 675905487 bytes, 2139 files
```
2026-02-16 00:52:02 -05:00
Jade Ellis
ecb1b73c84 style: Trailing whitespace 2026-02-16 03:47:16 +00:00
rooot
e03082480a docs(livekit): document nginx websockets too
Signed-off-by: rooot <hey@rooot.gay>
2026-02-16 03:43:43 +00:00
rooot
f9e7f019ad docs(livekit): fix port in caddy config example
Signed-off-by: rooot <hey@rooot.gay>
2026-02-16 03:43:43 +00:00
rooot
12069e7c86 docs(livekit): add nginx proxy example
Signed-off-by: rooot <hey@rooot.gay>
2026-02-16 03:43:42 +00:00
Jade Ellis
77928a62b4 docs: Document BSD community room 2026-02-16 03:31:56 +00:00
elisaado
c73cb5c1bf feat(docs): Add Kubernetes documentation with sample (#1387)
Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/1387
Reviewed-by: Jade Ellis <jade@ellis.link>
Co-authored-by: elisaado <forgejoellis@elisaado.com>
Co-committed-by: elisaado <forgejoellis@elisaado.com>
2026-02-16 03:14:29 +00:00
Jade Ellis
a140eacb04 docs: Fix trailing list 2026-02-16 03:12:50 +00:00
44 changed files with 825 additions and 640 deletions

View File

@@ -23,7 +23,7 @@ repos:
- id: check-added-large-files
- repo: https://github.com/crate-ci/typos
rev: v1.43.4
rev: v1.43.5
hooks:
- id: typos
- id: typos

View File

@@ -85,24 +85,31 @@ ### Matrix tests
### Writing documentation
Continuwuity's website uses [`mdbook`][mdbook] and is deployed via CI using Cloudflare Pages
Continuwuity's website uses [`rspress`][rspress] and is deployed via CI using Cloudflare Pages
in the [`documentation.yml`][documentation.yml] workflow file. All documentation is in the `docs/`
directory at the top level.
To build the documentation locally:
To load the documentation locally:
1. Install NodeJS and npm from their [official website][nodejs-download] or via your package manager of choice
2. From the project's root directory, install the relevant npm modules
1. Install mdbook if you don't have it already:
```bash
cargo install mdbook # or cargo binstall, or another method
npm ci
```
2. Build the documentation:
3. Make changes to the document pages as you see fit
4. Generate a live preview of the documentation
```bash
mdbook build
npm run docs:dev
```
The output of the mdbook generation is in `public/`. You can open the HTML files directly in your browser without needing a web server.
A webserver for the docs will be spun up for you (e.g. at `http://localhost:3000`). Any changes you make to the documentation will be live-reloaded on the webpage.
Alternatively, you can build the documentation using `npm run docs:build` - the output of this will be in the `/doc_build` directory. Once you're happy with your documentation updates, you can commit the changes.
### Commit Messages
@@ -169,5 +176,6 @@ ### Creating pull requests
[continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org
[complement]: https://github.com/matrix-org/complement/
[sytest]: https://github.com/matrix-org/sytest/
[mdbook]: https://rust-lang.github.io/mdBook/
[nodejs-download]: https://nodejs.org/en/download
[rspress]: https://rspress.rs/
[documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml

761
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -68,7 +68,7 @@ default-features = false
version = "0.1.3"
[workspace.dependencies.rand]
version = "0.8.5"
version = "0.10.0"
# Used for the http request / response body type for Ruma endpoints used with reqwest
[workspace.dependencies.bytes]
@@ -84,7 +84,7 @@ version = "1.3.1"
version = "1.11.1"
[workspace.dependencies.axum]
version = "0.7.9"
version = "0.8.8"
default-features = false
features = [
"form",
@@ -97,7 +97,7 @@ features = [
]
[workspace.dependencies.axum-extra]
version = "0.9.6"
version = "0.10.1"
default-features = false
features = ["typed-header", "tracing"]
@@ -110,7 +110,7 @@ default-features = false
version = "0.7"
[workspace.dependencies.axum-client-ip]
version = "0.6.1"
version = "0.7"
[workspace.dependencies.tower]
version = "0.5.2"
@@ -118,7 +118,7 @@ default-features = false
features = ["util"]
[workspace.dependencies.tower-http]
version = "0.6.2"
version = "0.6.8"
default-features = false
features = [
"add-extension",
@@ -298,7 +298,7 @@ default-features = false
features = ["env", "toml"]
[workspace.dependencies.hickory-resolver]
version = "0.25.1"
version = "0.25.2"
default-features = false
features = [
"serde",
@@ -342,7 +342,8 @@ version = "0.1.2"
# Used for matrix spec type definitions and helpers
[workspace.dependencies.ruma]
git = "https://forgejo.ellis.link/continuwuation/ruwuma"
rev = "b496b7f38d517149361a882e75d3fd4faf210441"
#branch = "conduwuit-changes"
rev = "e087ff15888156942ca2ffe6097d1b4c3fd27628"
features = [
"compat",
"rand",
@@ -424,7 +425,7 @@ features = ["http", "grpc-tonic", "trace", "logs", "metrics"]
# optional sentry metrics for crash/panic reporting
[workspace.dependencies.sentry]
version = "0.45.0"
version = "0.46.0"
default-features = false
features = [
"backtrace",
@@ -440,9 +441,9 @@ features = [
]
[workspace.dependencies.sentry-tracing]
version = "0.45.0"
version = "0.46.0"
[workspace.dependencies.sentry-tower]
version = "0.45.0"
version = "0.46.0"
# jemalloc usage
[workspace.dependencies.tikv-jemalloc-sys]
@@ -553,7 +554,7 @@ version = "0.7.5"
version = "1.0.1"
[workspace.dependencies.askama]
version = "0.14.0"
version = "0.15.0"
#
# Patches

1
changelog.d/1393.bugfix Normal file
View File

@@ -0,0 +1 @@
Removed non-compliant nor functional room alias lookups over federation. Contributed by @nex

1
changelog.d/1399.feature Normal file
View File

@@ -0,0 +1 @@
Outgoing presence is now disabled by default, and the config option documentation has been adjusted to more accurately represent the weight of presence, typing indicators, and read receipts. Contributed by @nex.

1
changelog.d/1418.bugfix Normal file
View File

@@ -0,0 +1 @@
Removed ability to set rocksdb as read only. Doing so would cause unintentional and buggy behaviour. Contributed by @Terryiscool160.

1
changelog.d/1421.bugfix Normal file
View File

@@ -0,0 +1 @@
Fixed a startup crash in the sender service if we can't detect the number of CPU cores, even if the `sender_workers' config option is set correctly. Contributed by @katie.

View File

@@ -0,0 +1 @@
Updated `list-backups` admin command to output one backup per line.

View File

@@ -1056,14 +1056,6 @@
#
#rocksdb_repair = false
# This item is undocumented. Please contribute documentation for it.
#
#rocksdb_read_only = false
# This item is undocumented. Please contribute documentation for it.
#
#rocksdb_secondary = false
# Enables idle CPU priority for compaction thread. This is not enabled by
# default to prevent compaction from falling too far behind on busy
# systems.
@@ -1120,27 +1112,34 @@
# Allow local (your server only) presence updates/requests.
#
# Note that presence on continuwuity is very fast unlike Synapse's. If
# using outgoing presence, this MUST be enabled.
# Local presence must be enabled for outgoing presence to function.
#
# Note that local presence is not as heavy on the CPU as federated
# presence, but will still become more expensive the more local users you
# have.
#
#allow_local_presence = true
# Allow incoming federated presence updates/requests.
# Allow incoming federated presence updates.
#
# This option receives presence updates from other servers, but does not
# send any unless `allow_outgoing_presence` is true. Note that presence on
# continuwuity is very fast unlike Synapse's.
# This option enables processing inbound presence updates from other
# servers. Without it, remote users will appear as if they are always
# offline to your local users. This does not affect typing indicators or
# read receipts.
#
#allow_incoming_presence = true
# Allow outgoing presence updates/requests.
#
# This option sends presence updates to other servers, but does not
# receive any unless `allow_incoming_presence` is true. Note that presence
# on continuwuity is very fast unlike Synapse's. If using outgoing
# presence, you MUST enable `allow_local_presence` as well.
# This option sends presence updates to other servers, and requires that
# `allow_local_presence` is also enabled.
#
#allow_outgoing_presence = true
# Note that outgoing presence is very heavy on the CPU and network, and
# will typically cause extreme strain and slowdowns for no real benefit.
# There are only a few clients that even implement presence, so you
# probably don't want to enable this.
#
#allow_outgoing_presence = false
# How many seconds without presence updates before you become idle.
# Defaults to 5 minutes.
@@ -1174,6 +1173,10 @@
# Allow sending read receipts to remote servers.
#
# Note that sending read receipts to remote servers in large rooms with
# lots of other homeservers may cause additional strain on the CPU and
# network.
#
#allow_outgoing_read_receipts = true
# Allow local typing updates.
@@ -1185,6 +1188,10 @@
# Allow outgoing typing updates to federation.
#
# Note that sending typing indicators to remote servers in large rooms
# with lots of other homeservers may cause additional strain on the CPU
# and network.
#
#allow_outgoing_typing = true
# Allow incoming typing updates from federation.

View File

@@ -137,7 +137,7 @@ ### 4. Configure your Reverse Proxy
# for lk-jwt-service
@lk-jwt-service path /sfu/get* /healthz* /get_token*
route @lk-jwt-service {
reverse_proxy 127.0.0.1:8080
reverse_proxy 127.0.0.1:8081
}
# for livekit
@@ -146,6 +146,46 @@ ### 4. Configure your Reverse Proxy
```
</details>
<details>
<summary>Example nginx config</summary>
```
server {
server_name matrix-rtc.example.com;
# for lk-jwt-service
location ~ ^/(sfu/get|healthz|get_token) {
proxy_pass http://127.0.0.1:8081$request_uri;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
}
# for livekit
location / {
proxy_pass http://127.0.0.1:7880$request_uri;
proxy_set_header X-Forwarded-For $remote_addr;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Host $http_host;
proxy_buffering off;
# websocket
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
}
}
```
Note that for websockets to work, you need to have this somewhere outside your server block:
```
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}
```
</details>
<details>
<summary>Example traefik router</summary>
```
@@ -226,4 +266,3 @@ ### Related Documentation
- [Synapse documentation](https://github.com/element-hq/element-call/blob/livekit/docs/self-hosting.md)
- [Community guide](https://tomfos.tr/matrix/livekit/)
- [Community guide](https://blog.kimiblock.top/2024/12/24/hosting-element-call/)
-

View File

@@ -3,3 +3,5 @@ # Continuwuity for FreeBSD
Continuwuity currently does not provide FreeBSD builds or FreeBSD packaging. However, Continuwuity does build and work on FreeBSD using the system-provided RocksDB.
Contributions to get Continuwuity packaged for FreeBSD are welcome.
Please join our [Continuwuity BSD](https://matrix.to/#/%23bsd:continuwuity.org) community room.

View File

@@ -1,7 +1,109 @@
# Continuwuity for Kubernetes
Continuwuity doesn't support horizontal scalability or distributed loading
natively. However, [a community-maintained Helm Chart is available here to run
natively. However, a deployment in Kubernetes is very similar to the docker
setup. This is because Continuwuity can be fully configured using environment
variables. A sample StatefulSet is shared below. The only thing missing is
a PVC definition (named `continuwuity-data`) for the volume mounted to
the StatefulSet, an Ingress resources to point your webserver to the
Continuwuity Pods, and a Service resource (targeting `app.kubernetes.io/name: continuwuity`)
to glue the Ingress and Pod together.
Carefully go through the `env` section and add, change, and remove any env vars you like using the [Configuration reference](https://continuwuity.org/reference/config.html)
```yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: continuwuity
namespace: matrix
labels:
app.kubernetes.io/name: continuwuity
spec:
replicas: 1
serviceName: continuwuity
podManagementPolicy: Parallel
selector:
matchLabels:
app.kubernetes.io/name: continuwuity
template:
metadata:
labels:
app.kubernetes.io/name: continuwuity
spec:
securityContext:
sysctls:
- name: net.ipv4.ip_unprivileged_port_start
value: "0"
containers:
- name: continuwuity
# use a sha hash <3
image: forgejo.ellis.link/continuwuation/continuwuity:latest
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
volumeMounts:
- mountPath: /data
name: data
subPath: data
securityContext:
capabilities:
add:
- NET_BIND_SERVICE
env:
- name: TOKIO_WORKER_THREADS
value: "2"
- name: CONTINUWUITY_SERVER_NAME
value: "example.com"
- name: CONTINUWUITY_DATABASE_PATH
value: "/data/db"
- name: CONTINUWUITY_DATABASE_BACKEND
value: "rocksdb"
- name: CONTINUWUITY_PORT
value: "80"
- name: CONTINUWUITY_MAX_REQUEST_SIZE
value: "20000000"
- name: CONTINUWUITY_ALLOW_FEDERATION
value: "true"
- name: CONTINUWUITY_TRUSTED_SERVERS
value: '["matrix.org"]'
- name: CONTINUWUITY_ADDRESS
value: "0.0.0.0"
- name: CONTINUWUITY_ROCKSDB_PARALLELISM_THREADS
value: "1"
- name: CONTINUWUITY_WELL_KNOWN__SERVER
value: "matrix.example.com:443"
- name: CONTINUWUITY_WELL_KNOWN__CLIENT
value: "https://matrix.example.com"
- name: CONTINUWUITY_ALLOW_REGISTRATION
value: "false"
- name: RUST_LOG
value: info
readinessProbe:
httpGet:
path: /_matrix/federation/v1/version
port: http
periodSeconds: 4
failureThreshold: 5
resources:
# Continuwuity might use quite some RAM :3
requests:
cpu: "2"
memory: "512Mi"
limits:
cpu: "4"
memory: "2048Mi"
volumes:
- name: data
persistentVolumeClaim:
claimName: continuwuity-data
```
---
Apart from manually configuring the containers,
[a community-maintained Helm Chart is available here to run
conduwuit on Kubernetes](https://gitlab.cronce.io/charts/conduwuit)
This should be compatible with Continuwuity, but you will need to change the image reference.

View File

@@ -1,13 +1,28 @@
# Troubleshooting Continuwuity
> **Docker users ⚠️**
>
> Docker can be difficult to use and debug. It's common for Docker
> misconfigurations to cause issues, particularly with networking and permissions.
> Please check that your issues are not due to problems with your Docker setup.
:::warning{title="Docker users:"}
Docker can be difficult to use and debug. It's common for Docker
misconfigurations to cause issues, particularly with networking and permissions.
Please check that your issues are not due to problems with your Docker setup.
:::
## Continuwuity and Matrix issues
### Slow joins to rooms
Some slowness is to be expected if you're the first person on your homserver to join a room (which will
always be the case for single-user homeservers). In this situation, your homeserver has to verify the signatures of
all of the state events sent by other servers before your join. To make this process as fast as possible, make sure you have
multiple fast, trusted servers listed in `trusted_servers` in your configuration, and ensure
`query_trusted_key_servers_first_on_join` is set to true (the default).
If you need suggestions for trusted servers, ask in the Continuwuity main room.
However, _very_ slow joins, especially to rooms with only a few users in them or rooms created by another user
on your homeserver, may be caused by [issue !779](https://forgejo.ellis.link/continuwuation/continuwuity/issues/779),
which is a longstanding bug with synchronizing room joins to clients. In this situation, you did succeed in joining the room, but
the bug caused your homeserver to forget to tell your client. **To fix this, clear your client's cache.** Both Element and Cinny
have a button to clear their cache in the "About" section of their settings.
### Lost access to admin room
You can reinvite yourself to the admin room through the following methods:

View File

@@ -1,6 +1,7 @@
{
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": ["config:recommended", "replacements:all"],
"dependencyDashboard": true,
"osvVulnerabilityAlerts": true,
"lockFileMaintenance": {
"enabled": true,
@@ -57,12 +58,25 @@
"matchUpdateTypes": ["minor", "patch"],
"groupName": "github-actions-non-major"
},
{
"description": "Batch patch-level Node.js dependency updates",
"matchManagers": ["npm"],
"matchUpdateTypes": ["patch"],
"groupName": "node-patch-updates"
},
{
"description": "Pin forgejo artifact actions to prevent breaking changes",
"matchManagers": ["github-actions"],
"matchPackageNames": ["forgejo/upload-artifact", "forgejo/download-artifact"],
"enabled": false
},
{
"description": "Auto-merge crate-ci/typos minor updates",
"matchPackageNames": ["crate-ci/typos"],
"matchUpdateTypes": ["minor", "patch"],
"automerge": true,
"automergeStrategy": "fast-forward"
},
{
"description": "Auto-merge renovatebot docker image updates",
"matchDatasources": ["docker"],

View File

@@ -89,13 +89,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result {
locally, if not using get_alias_helper to fetch room ID remotely"
);
match self
.services
.rooms
.alias
.resolve_alias(room_alias, None)
.await
{
match self.services.rooms.alias.resolve_alias(room_alias).await {
| Ok((room_id, servers)) => {
debug!(
%room_id,
@@ -235,7 +229,7 @@ async fn ban_list_of_rooms(&self) -> Result {
.services
.rooms
.alias
.resolve_alias(room_alias, None)
.resolve_alias(room_alias)
.await
{
| Ok((room_id, servers)) => {
@@ -388,13 +382,7 @@ async fn unban_room(&self, room: OwnedRoomOrAliasId) -> Result {
room ID over federation"
);
match self
.services
.rooms
.alias
.resolve_alias(room_alias, None)
.await
{
match self.services.rooms.alias.resolve_alias(room_alias).await {
| Ok((room_id, servers)) => {
debug!(
%room_id,

View File

@@ -86,7 +86,7 @@ pub(super) async fn list_backups(&self) -> Result {
.db
.backup_list()?
.try_stream()
.try_for_each(|result| write!(self, "{result}"))
.try_for_each(|result| writeln!(self, "{result}"))
.await
}

View File

@@ -1,12 +1,6 @@
use axum::extract::State;
use conduwuit::{Err, Result, debug};
use conduwuit_service::Services;
use futures::StreamExt;
use rand::seq::SliceRandom;
use ruma::{
OwnedServerName, RoomAliasId, RoomId,
api::client::alias::{create_alias, delete_alias, get_alias},
};
use conduwuit::{Err, Result};
use ruma::api::client::alias::{create_alias, delete_alias, get_alias};
use crate::Ruma;
@@ -96,65 +90,9 @@ pub(crate) async fn get_alias_route(
) -> Result<get_alias::v3::Response> {
let room_alias = body.body.room_alias;
let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias, None).await
else {
let Ok((room_id, servers)) = services.rooms.alias.resolve_alias(&room_alias).await else {
return Err!(Request(NotFound("Room with alias not found.")));
};
let servers = room_available_servers(&services, &room_id, &room_alias, servers).await;
debug!(%room_alias, %room_id, "available servers: {servers:?}");
Ok(get_alias::v3::Response::new(room_id, servers))
}
async fn room_available_servers(
services: &Services,
room_id: &RoomId,
room_alias: &RoomAliasId,
pre_servers: Vec<OwnedServerName>,
) -> Vec<OwnedServerName> {
// find active servers in room state cache to suggest
let mut servers: Vec<OwnedServerName> = services
.rooms
.state_cache
.room_servers(room_id)
.map(ToOwned::to_owned)
.collect()
.await;
// push any servers we want in the list already (e.g. responded remote alias
// servers, room alias server itself)
servers.extend(pre_servers);
servers.sort_unstable();
servers.dedup();
// shuffle list of servers randomly after sort and dedupe
servers.shuffle(&mut rand::thread_rng());
// insert our server as the very first choice if in list, else check if we can
// prefer the room alias server first
match servers
.iter()
.position(|server_name| services.globals.server_is_ours(server_name))
{
| Some(server_index) => {
servers.swap_remove(server_index);
servers.insert(0, services.globals.server_name().to_owned());
},
| _ => {
match servers
.iter()
.position(|server| server == room_alias.server_name())
{
| Some(alias_server_index) => {
servers.swap_remove(alias_server_index);
servers.insert(0, room_alias.server_name().into());
},
| _ => {},
}
},
}
servers
}

View File

@@ -198,11 +198,7 @@ pub(crate) async fn join_room_by_id_or_alias_route(
(servers, room_id)
},
| Err(room_alias) => {
let (room_id, mut servers) = services
.rooms
.alias
.resolve_alias(&room_alias, Some(body.via.clone()))
.await?;
let (room_id, mut servers) = services.rooms.alias.resolve_alias(&room_alias).await?;
banned_room_check(
&services,

View File

@@ -102,11 +102,7 @@ pub(crate) async fn knock_room_route(
(servers, room_id)
},
| Err(room_alias) => {
let (room_id, mut servers) = services
.rooms
.alias
.resolve_alias(&room_alias, Some(body.via.clone()))
.await?;
let (room_id, mut servers) = services.rooms.alias.resolve_alias(&room_alias).await?;
banned_room_check(
&services,

View File

@@ -244,7 +244,7 @@ fn build_report(report: Report) -> RoomMessageEventContent {
/// random delay sending a response per spec suggestion regarding
/// enumerating for potential events existing in our server.
async fn delay_response() {
let time_to_wait = rand::thread_rng().gen_range(2..5);
let time_to_wait = rand::random_range(2..5);
debug_info!(
"Got successful /report request, waiting {time_to_wait} seconds before sending \
successful response."

View File

@@ -342,10 +342,10 @@ async fn allowed_to_send_state_event(
}
for alias in aliases {
let (alias_room_id, _servers) = services
let (alias_room_id, _) = services
.rooms
.alias
.resolve_alias(&alias, None)
.resolve_alias(&alias)
.await
.map_err(|e| {
err!(Request(Unknown("Failed resolving alias \"{alias}\": {e}")))

View File

@@ -122,23 +122,23 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
// Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes
// share one Ruma request / response type pair with {get,send}_state_event_for_key_route
.route(
"/_matrix/client/r0/rooms/:room_id/state/:event_type",
"/_matrix/client/r0/rooms/{room_id}/state/{event_type}",
get(client::get_state_events_for_empty_key_route)
.put(client::send_state_event_for_empty_key_route),
)
.route(
"/_matrix/client/v3/rooms/:room_id/state/:event_type",
"/_matrix/client/v3/rooms/{room_id}/state/{event_type}",
get(client::get_state_events_for_empty_key_route)
.put(client::send_state_event_for_empty_key_route),
)
// These two endpoints allow trailing slashes
.route(
"/_matrix/client/r0/rooms/:room_id/state/:event_type/",
"/_matrix/client/r0/rooms/{room_id}/state/{event_type}/",
get(client::get_state_events_for_empty_key_route)
.put(client::send_state_event_for_empty_key_route),
)
.route(
"/_matrix/client/v3/rooms/:room_id/state/:event_type/",
"/_matrix/client/v3/rooms/{room_id}/state/{event_type}/",
get(client::get_state_events_for_empty_key_route)
.put(client::send_state_event_for_empty_key_route),
)
@@ -177,7 +177,7 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
.ruma_route(&client::get_mutual_rooms_route)
.ruma_route(&client::get_room_summary)
.route(
"/_matrix/client/unstable/im.nheko.summary/rooms/:room_id_or_alias/summary",
"/_matrix/client/unstable/im.nheko.summary/rooms/{room_id_or_alias}/summary",
get(client::get_room_summary_legacy)
)
.ruma_route(&client::get_suspended_status)
@@ -196,7 +196,7 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
.ruma_route(&server::get_server_version_route)
.route("/_matrix/key/v2/server", get(server::get_server_keys_route))
.route(
"/_matrix/key/v2/server/:key_id",
"/_matrix/key/v2/server/{key_id}",
get(server::get_server_keys_deprecated_route),
)
.ruma_route(&server::get_public_rooms_route)
@@ -232,9 +232,9 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
.route("/_continuwuity/local_user_count", get(client::conduwuit_local_user_count));
} else {
router = router
.route("/_matrix/federation/*path", any(federation_disabled))
.route("/_matrix/federation/{*path}", any(federation_disabled))
.route("/.well-known/matrix/server", any(federation_disabled))
.route("/_matrix/key/*path", any(federation_disabled))
.route("/_matrix/key/{*path}", any(federation_disabled))
.route("/_conduwuit/local_user_count", any(federation_disabled))
.route("/_continuwuity/local_user_count", any(federation_disabled));
}
@@ -253,27 +253,27 @@ pub fn build(router: Router<State>, server: &Server) -> Router<State> {
get(client::get_media_preview_legacy_legacy_route),
)
.route(
"/_matrix/media/v1/download/:server_name/:media_id",
"/_matrix/media/v1/download/{server_name}/{media_id}",
get(client::get_content_legacy_legacy_route),
)
.route(
"/_matrix/media/v1/download/:server_name/:media_id/:file_name",
"/_matrix/media/v1/download/{server_name}/{media_id}/{file_name}",
get(client::get_content_as_filename_legacy_legacy_route),
)
.route(
"/_matrix/media/v1/thumbnail/:server_name/:media_id",
"/_matrix/media/v1/thumbnail/{server_name}/{media_id}",
get(client::get_content_thumbnail_legacy_legacy_route),
);
} else {
router = router
.route("/_matrix/media/v1/*path", any(legacy_media_disabled))
.route("/_matrix/media/v1/{*path}", any(legacy_media_disabled))
.route("/_matrix/media/v3/config", any(legacy_media_disabled))
.route("/_matrix/media/v3/download/*path", any(legacy_media_disabled))
.route("/_matrix/media/v3/thumbnail/*path", any(legacy_media_disabled))
.route("/_matrix/media/v3/download/{*path}", any(legacy_media_disabled))
.route("/_matrix/media/v3/thumbnail/{*path}", any(legacy_media_disabled))
.route("/_matrix/media/v3/preview_url", any(redirect_legacy_preview))
.route("/_matrix/media/r0/config", any(legacy_media_disabled))
.route("/_matrix/media/r0/download/*path", any(legacy_media_disabled))
.route("/_matrix/media/r0/thumbnail/*path", any(legacy_media_disabled))
.route("/_matrix/media/r0/download/{*path}", any(legacy_media_disabled))
.route("/_matrix/media/r0/thumbnail/{*path}", any(legacy_media_disabled))
.route("/_matrix/media/r0/preview_url", any(redirect_legacy_preview));
}

View File

@@ -1,6 +1,5 @@
use std::{mem, ops::Deref};
use async_trait::async_trait;
use axum::{body::Body, extract::FromRequest};
use bytes::{BufMut, Bytes, BytesMut};
use conduwuit::{Error, Result, debug, debug_warn, err, trace, utils::string::EMPTY};
@@ -79,7 +78,6 @@ impl<T> Deref for Args<T>
fn deref(&self) -> &Self::Target { &self.body }
}
#[async_trait]
impl<T> FromRequest<State, Body> for Args<T>
where
T: IncomingRequest + Send + Sync + 'static,

View File

@@ -54,7 +54,8 @@ pub(super) async fn auth(
json_body: Option<&CanonicalJsonValue>,
metadata: &Metadata,
) -> Result<Auth> {
let bearer: Option<TypedHeader<Authorization<Bearer>>> = request.parts.extract().await?;
let bearer: Option<TypedHeader<Authorization<Bearer>>> =
request.parts.extract().await.unwrap_or(None);
let token = match &bearer {
| Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()),
| None => request.query.access_token.as_deref(),

View File

@@ -40,7 +40,7 @@ pub(crate) async fn get_room_information_route(
servers.sort_unstable();
servers.dedup();
servers.shuffle(&mut rand::thread_rng());
servers.shuffle(&mut rand::rng());
// insert our server as the very first choice if in list
if let Some(server_index) = servers

View File

@@ -86,6 +86,7 @@ libloading.optional = true
log.workspace = true
num-traits.workspace = true
rand.workspace = true
rand_core = { version = "0.6.4", features = ["getrandom"] }
regex.workspace = true
reqwest.workspace = true
ring.workspace = true

View File

@@ -1244,12 +1244,6 @@ pub struct Config {
#[serde(default)]
pub rocksdb_repair: bool,
#[serde(default)]
pub rocksdb_read_only: bool,
#[serde(default)]
pub rocksdb_secondary: bool,
/// Enables idle CPU priority for compaction thread. This is not enabled by
/// default to prevent compaction from falling too far behind on busy
/// systems.
@@ -1309,26 +1303,33 @@ pub struct Config {
/// Allow local (your server only) presence updates/requests.
///
/// Note that presence on continuwuity is very fast unlike Synapse's. If
/// using outgoing presence, this MUST be enabled.
/// Local presence must be enabled for outgoing presence to function.
///
/// Note that local presence is not as heavy on the CPU as federated
/// presence, but will still become more expensive the more local users you
/// have.
#[serde(default = "true_fn")]
pub allow_local_presence: bool,
/// Allow incoming federated presence updates/requests.
/// Allow incoming federated presence updates.
///
/// This option receives presence updates from other servers, but does not
/// send any unless `allow_outgoing_presence` is true. Note that presence on
/// continuwuity is very fast unlike Synapse's.
/// This option enables processing inbound presence updates from other
/// servers. Without it, remote users will appear as if they are always
/// offline to your local users. This does not affect typing indicators or
/// read receipts.
#[serde(default = "true_fn")]
pub allow_incoming_presence: bool,
/// Allow outgoing presence updates/requests.
///
/// This option sends presence updates to other servers, but does not
/// receive any unless `allow_incoming_presence` is true. Note that presence
/// on continuwuity is very fast unlike Synapse's. If using outgoing
/// presence, you MUST enable `allow_local_presence` as well.
#[serde(default = "true_fn")]
/// This option sends presence updates to other servers, and requires that
/// `allow_local_presence` is also enabled.
///
/// Note that outgoing presence is very heavy on the CPU and network, and
/// will typically cause extreme strain and slowdowns for no real benefit.
/// There are only a few clients that even implement presence, so you
/// probably don't want to enable this.
#[serde(default)]
pub allow_outgoing_presence: bool,
/// How many seconds without presence updates before you become idle.
@@ -1366,6 +1367,10 @@ pub struct Config {
pub allow_incoming_read_receipts: bool,
/// Allow sending read receipts to remote servers.
///
/// Note that sending read receipts to remote servers in large rooms with
/// lots of other homeservers may cause additional strain on the CPU and
/// network.
#[serde(default = "true_fn")]
pub allow_outgoing_read_receipts: bool,
@@ -1377,6 +1382,10 @@ pub struct Config {
pub allow_local_typing: bool,
/// Allow outgoing typing updates to federation.
///
/// Note that sending typing indicators to remote servers in large rooms
/// with lots of other homeservers may cause additional strain on the CPU
/// and network.
#[serde(default = "true_fn")]
pub allow_outgoing_typing: bool,

View File

@@ -1046,7 +1046,7 @@ async fn test_event_sort() {
// don't remove any events so we know it sorts them all correctly
let mut events_to_sort = events.keys().cloned().collect::<Vec<_>>();
events_to_sort.shuffle(&mut rand::thread_rng());
events_to_sort.shuffle(&mut rand::rng());
let power_level = resolved_power
.get(&(StateEventType::RoomPowerLevels, "".into()))

View File

@@ -28,7 +28,7 @@ fn init_argon() -> Argon2<'static> {
}
pub(super) fn password(password: &str) -> Result<String> {
let salt = SaltString::generate(rand::thread_rng());
let salt = SaltString::generate(rand_core::OsRng);
ARGON
.get_or_init(init_argon)
.hash_password(password.as_bytes(), &salt)

View File

@@ -4,16 +4,16 @@
};
use arrayvec::ArrayString;
use rand::{Rng, seq::SliceRandom, thread_rng};
use rand::{Rng, RngExt, seq::SliceRandom};
pub fn shuffle<T>(vec: &mut [T]) {
let mut rng = thread_rng();
let mut rng = rand::rng();
vec.shuffle(&mut rng);
}
pub fn string(length: usize) -> String {
thread_rng()
.sample_iter(&rand::distributions::Alphanumeric)
rand::rng()
.sample_iter(&rand::distr::Alphanumeric)
.take(length)
.map(char::from)
.collect()
@@ -22,8 +22,8 @@ pub fn string(length: usize) -> String {
#[inline]
pub fn string_array<const LENGTH: usize>() -> ArrayString<LENGTH> {
let mut ret = ArrayString::<LENGTH>::new();
thread_rng()
.sample_iter(&rand::distributions::Alphanumeric)
rand::rng()
.sample_iter(&rand::distr::Alphanumeric)
.take(LENGTH)
.map(char::from)
.for_each(|c| ret.push(c));
@@ -40,7 +40,4 @@ pub fn time_from_now_secs(range: Range<u64>) -> SystemTime {
}
#[must_use]
pub fn secs(range: Range<u64>) -> Duration {
let mut rng = thread_rng();
Duration::from_secs(rng.gen_range(range))
}
pub fn secs(range: Range<u64>) -> Duration { Duration::from_secs(rand::random_range(range)) }

View File

@@ -33,8 +33,6 @@ pub struct Engine {
pub(crate) db: Db,
pub(crate) pool: Arc<Pool>,
pub(crate) ctx: Arc<Context>,
pub(super) read_only: bool,
pub(super) secondary: bool,
pub(crate) checksums: bool,
corks: AtomicU32,
}
@@ -129,14 +127,6 @@ pub fn current_sequence(&self) -> u64 {
sequence
}
#[inline]
#[must_use]
pub fn is_read_only(&self) -> bool { self.secondary || self.read_only }
#[inline]
#[must_use]
pub fn is_secondary(&self) -> bool { self.secondary }
}
impl Drop for Engine {

View File

@@ -12,9 +12,8 @@ pub fn backup(&self) -> Result {
let mut engine = self.backup_engine()?;
let config = &self.ctx.server.config;
if config.database_backups_to_keep > 0 {
let flush = !self.is_read_only();
engine
.create_new_backup_flush(&self.db, flush)
.create_new_backup_flush(&self.db, true)
.map_err(map_err)?;
let engine_info = engine.get_backup_info();

View File

@@ -35,14 +35,7 @@ pub(crate) async fn open(ctx: Arc<Context>, desc: &[Descriptor]) -> Result<Arc<S
}
debug!("Opening database...");
let db = if config.rocksdb_read_only {
Db::open_cf_descriptors_read_only(&db_opts, path, cfds, false)
} else if config.rocksdb_secondary {
Db::open_cf_descriptors_as_secondary(&db_opts, path, path, cfds)
} else {
Db::open_cf_descriptors(&db_opts, path, cfds)
}
.or_else(or_else)?;
let db = Db::open_cf_descriptors(&db_opts, path, cfds).or_else(or_else)?;
info!(
columns = num_cfds,
@@ -55,8 +48,6 @@ pub(crate) async fn open(ctx: Arc<Context>, desc: &[Descriptor]) -> Result<Arc<S
db,
pool: ctx.pool.clone(),
ctx: ctx.clone(),
read_only: config.rocksdb_read_only,
secondary: config.rocksdb_secondary,
checksums: config.rocksdb_checksums,
corks: AtomicU32::new(0),
}))

View File

@@ -74,14 +74,6 @@ pub fn iter(&self) -> impl Iterator<Item = (&MapsKey, &MapsVal)> + Send + '_ {
#[inline]
pub fn keys(&self) -> impl Iterator<Item = &MapsKey> + Send + '_ { self.maps.keys() }
#[inline]
#[must_use]
pub fn is_read_only(&self) -> bool { self.db.is_read_only() }
#[inline]
#[must_use]
pub fn is_secondary(&self) -> bool { self.db.is_secondary() }
}
impl Index<&str> for Database {

View File

@@ -27,10 +27,6 @@ pub struct Args {
#[arg(long, short('O'))]
pub option: Vec<String>,
/// Run in a stricter read-only --maintenance mode.
#[arg(long)]
pub read_only: bool,
/// Run in maintenance mode while refusing connections.
#[arg(long)]
pub maintenance: bool,
@@ -143,11 +139,7 @@ pub(crate) fn parse() -> Args { Args::parse() }
/// Synthesize any command line options with configuration file options.
pub(crate) fn update(mut config: Figment, args: &Args) -> Result<Figment> {
if args.read_only {
config = config.join(("rocksdb_read_only", true));
}
if args.maintenance || args.read_only {
if args.maintenance {
config = config.join(("startup_netburst", false));
config = config.join(("listening", false));
}

View File

@@ -100,8 +100,7 @@ async fn worker(self: Arc<Self>) -> Result<()> {
}
let first_check_jitter = {
let mut rng = rand::thread_rng();
let jitter_percent = rng.gen_range(-50.0..=10.0);
let jitter_percent = rand::random_range(-50.0..=10.0);
self.interval.mul_f64(1.0 + jitter_percent / 100.0)
};

View File

@@ -37,10 +37,6 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
}
async fn worker(self: Arc<Self>) -> Result {
if self.services.globals.is_read_only() {
return Ok(());
}
if self.services.config.ldap.enable {
warn!("emergency password feature not available with LDAP enabled.");
return Ok(());

View File

@@ -156,7 +156,4 @@ pub fn user_is_local(&self, user_id: &UserId) -> bool {
pub fn server_is_ours(&self, server_name: &ServerName) -> bool {
server_name == self.server_name()
}
#[inline]
pub fn is_read_only(&self) -> bool { self.db.db.is_read_only() }
}

View File

@@ -3,7 +3,7 @@
use std::sync::Arc;
use conduwuit::{
Err, Event, Result, Server, err,
Err, Event, Result, err,
utils::{ReadyExt, stream::TryIgnore},
};
use database::{Deserialized, Ignore, Interfix, Map};
@@ -30,12 +30,12 @@ struct Data {
}
struct Services {
server: Arc<Server>,
admin: Dep<admin::Service>,
appservice: Dep<appservice::Service>,
globals: Dep<globals::Service>,
sending: Dep<sending::Service>,
state_accessor: Dep<rooms::state_accessor::Service>,
state_cache: Dep<rooms::state_cache::Service>,
}
impl crate::Service for Service {
@@ -47,13 +47,13 @@ fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
aliasid_alias: args.db["aliasid_alias"].clone(),
},
services: Services {
server: args.server.clone(),
admin: args.depend::<admin::Service>("admin"),
appservice: args.depend::<appservice::Service>("appservice"),
globals: args.depend::<globals::Service>("globals"),
sending: args.depend::<sending::Service>("sending"),
state_accessor: args
.depend::<rooms::state_accessor::Service>("rooms::state_accessor"),
state_cache: args.depend::<rooms::state_cache::Service>("rooms::state_cache"),
},
}))
}
@@ -117,6 +117,9 @@ pub async fn remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) -> Resul
Ok(())
}
/// Resolves the given room ID or alias, returning the resolved room ID.
/// Unlike resolve_with_servers (the underlying call), potential resident
/// servers are not returned
#[inline]
pub async fn resolve(&self, room: &RoomOrAliasId) -> Result<OwnedRoomId> {
self.resolve_with_servers(room, None)
@@ -124,6 +127,14 @@ pub async fn resolve(&self, room: &RoomOrAliasId) -> Result<OwnedRoomId> {
.map(|(room_id, _)| room_id)
}
/// Resolves the given room ID or alias, returning the resolved room ID, and
/// any servers that might be able to assist in fetching room data.
///
/// If the input is a room ID, this simply returns it and <servers>.
/// If the input is an alias, this attempts to resolve it locally, then via
/// appservices, and finally remotely if the alias is not local.
/// If the alias is successfully resolved, the room ID and an empty list of
/// servers is returned.
pub async fn resolve_with_servers(
&self,
room: &RoomOrAliasId,
@@ -134,28 +145,26 @@ pub async fn resolve_with_servers(
Ok((room_id.to_owned(), servers.unwrap_or_default()))
} else {
let alias: &RoomAliasId = room.try_into().expect("valid RoomAliasId");
self.resolve_alias(alias, servers).await
self.resolve_alias(alias).await
}
}
/// Resolves the given room alias, returning the resolved room ID and any
/// servers that might be in the room.
#[tracing::instrument(skip(self), name = "resolve")]
pub async fn resolve_alias(
&self,
room_alias: &RoomAliasId,
servers: Option<Vec<OwnedServerName>>,
) -> Result<(OwnedRoomId, Vec<OwnedServerName>)> {
let server_name = room_alias.server_name();
let server_is_ours = self.services.globals.server_is_ours(server_name);
let servers_contains_ours = || {
servers
.as_ref()
.is_some_and(|servers| servers.contains(&self.services.server.name))
};
let server_is_ours = self
.services
.globals
.server_is_ours(room_alias.server_name());
if !server_is_ours && !servers_contains_ours() {
return self
.remote_resolve(room_alias, servers.unwrap_or_default())
.await;
if !server_is_ours {
// TODO: The spec advises servers may cache remote room aliases temporarily.
// We might want to look at doing that.
return self.remote_resolve(room_alias).await;
}
let room_id = match self.resolve_local_alias(room_alias).await {
@@ -163,10 +172,18 @@ pub async fn resolve_alias(
| Err(_) => self.resolve_appservice_alias(room_alias).await?,
};
room_id.map_or_else(
|| Err!(Request(NotFound("Room with alias not found."))),
|room_id| Ok((room_id, Vec::new())),
)
if let Some(room_id) = room_id {
let servers: Vec<OwnedServerName> = self
.services
.state_cache
.room_servers(&room_id)
.map(ToOwned::to_owned)
.collect()
.await;
return Ok((room_id, servers));
}
Err!(Request(NotFound("Alias does not exist.")))
}
#[tracing::instrument(skip(self), level = "debug")]
@@ -206,12 +223,12 @@ async fn user_can_remove_alias(&self, alias: &RoomAliasId, user_id: &UserId) ->
// The creator of an alias can remove it
if self
.who_created_alias(alias).await
.is_ok_and(|user| user == user_id)
// Server admins can remove any local alias
|| self.services.admin.user_is_admin(user_id).await
// Always allow the server service account to remove the alias, since there may not be an admin room
|| server_user == user_id
.who_created_alias(alias).await
.is_ok_and(|user| user == user_id)
// Server admins can remove any local alias
|| self.services.admin.user_is_admin(user_id).await
// Always allow the server service account to remove the alias, since there may not be an admin room
|| server_user == user_id
{
return Ok(true);
}

View File

@@ -1,6 +1,4 @@
use std::iter::once;
use conduwuit::{Result, debug, debug_error, err, implement};
use conduwuit::{Result, debug, error, implement};
use federation::query::get_room_information::v1::Response;
use ruma::{OwnedRoomId, OwnedServerName, RoomAliasId, ServerName, api::federation};
@@ -8,40 +6,21 @@
pub(super) async fn remote_resolve(
&self,
room_alias: &RoomAliasId,
servers: Vec<OwnedServerName>,
) -> Result<(OwnedRoomId, Vec<OwnedServerName>)> {
debug!(?room_alias, servers = ?servers, "resolve");
let servers = once(room_alias.server_name())
.map(ToOwned::to_owned)
.chain(servers.into_iter());
let mut resolved_servers = Vec::new();
let mut resolved_room_id: Option<OwnedRoomId> = None;
for server in servers {
match self.remote_request(room_alias, &server).await {
| Err(e) => debug_error!("Failed to query for {room_alias:?} from {server}: {e}"),
| Ok(Response { room_id, servers }) => {
debug!(
"Server {server} answered with {room_id:?} for {room_alias:?} servers: \
{servers:?}"
);
resolved_room_id.get_or_insert(room_id);
add_server(&mut resolved_servers, server);
if !servers.is_empty() {
add_servers(&mut resolved_servers, servers);
break;
}
},
}
debug!("Asking {} to resolve {room_alias:?}", room_alias.server_name());
match self
.remote_request(room_alias, room_alias.server_name())
.await
{
| Err(e) => {
error!("Unable to resolve remote room alias {}: {e}", room_alias);
Err(e)
},
| Ok(Response { room_id, servers }) => {
debug!("Remote resolved {room_alias:?} to {room_id:?} with servers {servers:?}");
Ok((room_id, servers))
},
}
resolved_room_id
.map(|room_id| (room_id, resolved_servers))
.ok_or_else(|| {
err!(Request(NotFound("No servers could assist in resolving the room alias")))
})
}
#[implement(super::Service)]
@@ -59,15 +38,3 @@ async fn remote_request(
.send_federation_request(server, request)
.await
}
fn add_servers(servers: &mut Vec<OwnedServerName>, new: Vec<OwnedServerName>) {
for server in new {
add_server(servers, server);
}
}
fn add_server(servers: &mut Vec<OwnedServerName>, server: OwnedServerName) {
if !servers.contains(&server) {
servers.push(server);
}
}

View File

@@ -139,7 +139,12 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
})
.boxed();
let mut federated_room = false;
while let Some(ref backfill_server) = servers.next().await {
if !self.services.globals.server_is_ours(backfill_server) {
federated_room = true;
}
info!("Asking {backfill_server} for backfill in {room_id}");
let response = self
.services
@@ -168,7 +173,9 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
}
}
warn!("No servers could backfill, but backfill was needed in room {room_id}");
if federated_room {
warn!("No servers could backfill, but backfill was needed in room {room_id}");
}
Ok(())
}

View File

@@ -385,11 +385,13 @@ fn num_senders(args: &crate::Args<'_>) -> usize {
const MIN_SENDERS: usize = 1;
// Limit the number of senders to the number of workers threads or number of
// cores, conservatively.
let max_senders = args
.server
.metrics
.num_workers()
.min(available_parallelism());
let mut max_senders = args.server.metrics.num_workers();
// Work around some platforms not returning the number of cores.
let num_cores = available_parallelism();
if num_cores > 0 {
max_senders = max_senders.min(num_cores);
}
// If the user doesn't override the default 0, this is intended to then default
// to 1 for now as multiple senders is experimental.

View File

@@ -139,7 +139,7 @@ pub async fn start(self: &Arc<Self>) -> Result<Arc<Self>> {
// reset dormant online/away statuses to offline, and set the server user as
// online
if self.server.config.allow_local_presence && !self.db.is_read_only() {
if self.server.config.allow_local_presence {
self.presence.unset_all_presence().await;
_ = self
.presence
@@ -156,7 +156,7 @@ pub async fn stop(&self) {
info!("Shutting down services...");
// set the server user as offline
if self.server.config.allow_local_presence && !self.db.is_read_only() {
if self.server.config.allow_local_presence {
_ = self
.presence
.ping_presence(&self.globals.server_user, &ruma::presence::PresenceState::Offline)