From 6e16a6ef8f90ee4a1b2a92435475ddf7359ba837 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 14 Jun 2025 22:34:24 +0100 Subject: [PATCH 001/270] chore: Release announcement --- docs/static/announcements.json | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/docs/static/announcements.json b/docs/static/announcements.json index 9b97d091..7dd2fb72 100644 --- a/docs/static/announcements.json +++ b/docs/static/announcements.json @@ -4,6 +4,10 @@ { "id": 1, "message": "Welcome to Continuwuity! Important announcements about the project will appear here." + }, + { + "id": 2, + "message": "🎉 Continuwuity v0.5.0-rc.6 is now available! This release includes improved knock-restricted room handling, automatic support contact configuration, and a new HTML landing page. Check [the release notes for full details](https://forgejo.ellis.link/continuwuation/continuwuity/releases/tag/v0.5.0-rc.6) and upgrade instructions." } ] -} \ No newline at end of file +} From d6fd30393c224c216867f46341616c041bbcd97a Mon Sep 17 00:00:00 2001 From: Kimiblock Date: Thu, 19 Jun 2025 12:36:49 +0000 Subject: [PATCH 002/270] Update docs/deploying/arch-linux.md --- docs/deploying/arch-linux.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/deploying/arch-linux.md b/docs/deploying/arch-linux.md index a14201e3..52d2afb2 100644 --- a/docs/deploying/arch-linux.md +++ b/docs/deploying/arch-linux.md @@ -1,3 +1,5 @@ # Continuwuity for Arch Linux -Continuwuity does not have any Arch Linux packages at this time. +Continuwuity is available on the `archlinuxcn` repository and AUR, with the same package name `continuwuity`, which includes latest taggged version. The development version is available on AUR as `continuwuity-git` + +Simply install the `continuwuity` package. Configure the service in `/etc/conduwuit/conduwuit.toml`, then enable/start the continuwuity.service. \ No newline at end of file From e508b1197f3a6e485f35edf0d3c529cb58c391b5 Mon Sep 17 00:00:00 2001 From: nex Date: Thu, 19 Jun 2025 21:27:50 +0000 Subject: [PATCH 003/270] feat: allow overriding the "most recent event" when forcing a state download (#853) Add option to select which event to set the state at to, for the force-set-room-state admin command. This allows us to work around issues where the latest PDU is one that remote servers don't know about (i.e. failed federation for whatever reason) Closes #852 Reviewed-on: https://forgejo.ellis.link/continuwuation/continuwuity/pulls/853 Reviewed-by: Jade Ellis Co-authored-by: nex Co-committed-by: nex --- src/admin/debug/commands.rs | 37 +++++++++++++++++++++++-------------- src/admin/debug/mod.rs | 3 +++ 2 files changed, 26 insertions(+), 14 deletions(-) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index d0debc2a..a397e0fc 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -239,10 +239,11 @@ pub(super) async fn get_remote_pdu( }) .await { - | Err(e) => + | Err(e) => { return Err!( "Remote server did not have PDU or failed sending request to remote server: {e}" - ), + ); + }, | Ok(response) => { let json: CanonicalJsonObject = serde_json::from_str(response.pdu.get()).map_err(|e| { @@ -384,8 +385,9 @@ pub(super) async fn change_log_level(&self, filter: Option, reset: bool) .reload .reload(&old_filter_layer, Some(handles)) { - | Err(e) => - return Err!("Failed to modify and reload the global tracing log level: {e}"), + | Err(e) => { + return Err!("Failed to modify and reload the global tracing log level: {e}"); + }, | Ok(()) => { let value = &self.services.server.config.log; let out = format!("Successfully changed log level back to config value {value}"); @@ -408,8 +410,9 @@ pub(super) async fn change_log_level(&self, filter: Option, reset: bool) .reload(&new_filter_layer, Some(handles)) { | Ok(()) => return self.write_str("Successfully changed log level").await, - | Err(e) => - return Err!("Failed to modify and reload the global tracing log level: {e}"), + | Err(e) => { + return Err!("Failed to modify and reload the global tracing log level: {e}"); + }, } } @@ -529,6 +532,7 @@ pub(super) async fn force_set_room_state_from_server( &self, room_id: OwnedRoomId, server_name: OwnedServerName, + at_event: Option, ) -> Result { if !self .services @@ -540,13 +544,18 @@ pub(super) async fn force_set_room_state_from_server( return Err!("We are not participating in the room / we don't know about the room ID."); } - let first_pdu = self - .services - .rooms - .timeline - .latest_pdu_in_room(&room_id) - .await - .map_err(|_| err!(Database("Failed to find the latest PDU in database")))?; + let at_event_id = match at_event { + | Some(event_id) => event_id, + | None => self + .services + .rooms + .timeline + .latest_pdu_in_room(&room_id) + .await + .map_err(|_| err!(Database("Failed to find the latest PDU in database")))? + .event_id + .clone(), + }; let room_version = self.services.rooms.state.get_room_version(&room_id).await?; @@ -557,7 +566,7 @@ pub(super) async fn force_set_room_state_from_server( .sending .send_federation_request(&server_name, get_room_state::v1::Request { room_id: room_id.clone(), - event_id: first_pdu.event_id.clone(), + event_id: at_event_id, }) .await?; diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index 1fd4e263..bceee9ba 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -177,6 +177,9 @@ pub(super) enum DebugCommand { room_id: OwnedRoomId, /// The server we will use to query the room state for server_name: OwnedServerName, + /// The event ID of the latest known PDU in the room. Will be found + /// automatically if not provided. + event_id: Option, }, /// - Runs a server name through conduwuit's true destination resolution From a737d845a436016b9eaf55ca19cbdd4cc419337a Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 21 May 2025 20:41:34 +0100 Subject: [PATCH 004/270] chore: Don't specify targets in rust-toolchain --- rust-toolchain.toml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index aadc8f99..65890260 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -19,11 +19,3 @@ components = [ "rustfmt", "clippy", ] -targets = [ - #"x86_64-apple-darwin", - "x86_64-unknown-linux-gnu", - "x86_64-unknown-linux-musl", - "aarch64-unknown-linux-musl", - "aarch64-unknown-linux-gnu", - #"aarch64-apple-darwin", -] From b526935d45429be72b7e99aef7e57e236b2e84bf Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 20 Jun 2025 21:35:03 +0100 Subject: [PATCH 005/270] build: Specify debian version --- docker/Dockerfile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index e734fb81..dfda57fb 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,8 +1,9 @@ ARG RUST_VERSION=1 +ARG DEBIAN_VERSION=bookworm FROM --platform=$BUILDPLATFORM docker.io/tonistiigi/xx AS xx -FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS base -FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-bookworm AS toolchain +FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-${DEBIAN_VERSION} AS base +FROM --platform=$BUILDPLATFORM rust:${RUST_VERSION}-slim-${DEBIAN_VERSION} AS toolchain # Prevent deletion of apt cache RUN rm -f /etc/apt/apt.conf.d/docker-clean From 08fbcbba691234b6240c03805354424deb2931e9 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 20 Jun 2025 21:35:48 +0100 Subject: [PATCH 006/270] build: Use newer LLVM for rust 1.87 --- docker/Dockerfile | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/docker/Dockerfile b/docker/Dockerfile index dfda57fb..e40438e9 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -10,7 +10,7 @@ RUN rm -f /etc/apt/apt.conf.d/docker-clean # Match Rustc version as close as possible # rustc -vV -ARG LLVM_VERSION=19 +ARG LLVM_VERSION=20 # ENV RUSTUP_TOOLCHAIN=${RUST_VERSION} # Install repo tools @@ -20,10 +20,18 @@ ARG LLVM_VERSION=19 RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ --mount=type=cache,target=/var/lib/apt,sharing=locked \ apt-get update && apt-get install -y \ - clang-${LLVM_VERSION} lld-${LLVM_VERSION} pkg-config make jq \ - curl git \ + pkg-config make jq \ + curl git software-properties-common \ file +# LLVM packages +RUN --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked \ + curl https://apt.llvm.org/llvm.sh > llvm.sh && \ + chmod +x llvm.sh && \ + ./llvm.sh ${LLVM_VERSION} && \ + rm llvm.sh + # Create symlinks for LLVM tools RUN < Date: Fri, 20 Jun 2025 21:45:29 +0100 Subject: [PATCH 007/270] build: Upgrade to Rust 1.87 --- rust-toolchain.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 65890260..bdb608aa 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -9,7 +9,7 @@ # If you're having trouble making the relevant changes, bug a maintainer. [toolchain] -channel = "1.86.0" +channel = "1.87.0" profile = "minimal" components = [ # For rust-analyzer From 01200d9b545bdad3f59b0db447a1480aa6314d50 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 20 Jun 2025 21:48:37 +0100 Subject: [PATCH 008/270] build: Allow specifying build profile Additionally splits caches by target CPU --- .forgejo/workflows/release-image.yml | 12 ++++++++---- docker/Dockerfile | 5 +++-- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/.forgejo/workflows/release-image.yml b/.forgejo/workflows/release-image.yml index 55b303b2..170fe668 100644 --- a/.forgejo/workflows/release-image.yml +++ b/.forgejo/workflows/release-image.yml @@ -49,6 +49,7 @@ jobs: const platforms = ['linux/amd64', 'linux/arm64'] core.setOutput('build_matrix', JSON.stringify({ platform: platforms, + target_cpu: ['base'], include: platforms.map(platform => { return { platform, slug: platform.replace('/', '-') @@ -66,6 +67,8 @@ jobs: strategy: matrix: { + "target_cpu": ["base"], + "profile": ["release"], "include": [ { "platform": "linux/amd64", "slug": "linux-amd64" }, @@ -73,6 +76,7 @@ jobs: ], "platform": ["linux/amd64", "linux/arm64"], } + steps: - name: Echo strategy run: echo '${{ toJSON(fromJSON(needs.define-variables.outputs.build_matrix)) }}' @@ -140,8 +144,8 @@ jobs: uses: actions/cache@v3 with: path: | - cargo-target-${{ matrix.slug }} - key: cargo-target-${{ matrix.slug }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}} + cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }} + key: cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}-${{hashFiles('**/Cargo.lock') }}-${{steps.rust-toolchain.outputs.rustc_version}} - name: Cache apt cache id: cache-apt uses: actions/cache@v3 @@ -163,9 +167,9 @@ jobs: { ".cargo/registry": "/usr/local/cargo/registry", ".cargo/git/db": "/usr/local/cargo/git/db", - "cargo-target-${{ matrix.slug }}": { + "cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}": { "target": "/app/target", - "id": "cargo-target-${{ matrix.platform }}" + "id": "cargo-target-${{ matrix.target_cpu }}-${{ matrix.slug }}-${{ matrix.profile }}" }, "var-cache-apt-${{ matrix.slug }}": "/var/cache/apt", "var-lib-apt-${{ matrix.slug }}": "/var/lib/apt" diff --git a/docker/Dockerfile b/docker/Dockerfile index e40438e9..bd6e72d1 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -149,11 +149,12 @@ ENV GIT_REMOTE_COMMIT_URL=$GIT_REMOTE_COMMIT_URL ENV CONDUWUIT_VERSION_EXTRA=$CONDUWUIT_VERSION_EXTRA ENV CONTINUWUITY_VERSION_EXTRA=$CONTINUWUITY_VERSION_EXTRA +ARG RUST_PROFILE=release # Build the binary RUN --mount=type=cache,target=/usr/local/cargo/registry \ --mount=type=cache,target=/usr/local/cargo/git/db \ - --mount=type=cache,target=/app/target,id=cargo-target-${TARGETPLATFORM} \ + --mount=type=cache,target=/app/target,id=cargo-target-${TARGET_CPU}-${TARGETPLATFORM}-${RUST_PROFILE} \ bash <<'EOF' set -o allexport set -o xtrace @@ -162,7 +163,7 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \ jq -r ".target_directory")) mkdir /out/sbin PACKAGE=conduwuit - xx-cargo build --locked --release \ + xx-cargo build --locked --profile ${RUST_PROFILE} \ -p $PACKAGE; BINARIES=($(cargo metadata --no-deps --format-version 1 | \ jq -r ".packages[] | select(.name == \"$PACKAGE\") | .targets[] | select( .kind | map(. == \"bin\") | any ) | .name")) From add5c7052c8a2aa61a78a785500530ecdd394626 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 20 Jun 2025 21:51:53 +0100 Subject: [PATCH 009/270] chore: Update lockfile --- Cargo.lock | 647 ++++++++++++++++++++++++++++++++++------------------- 1 file changed, 415 insertions(+), 232 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ec6e848d..59662da2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aho-corasick" @@ -28,9 +28,12 @@ dependencies = [ [[package]] name = "aligned-vec" -version = "0.5.0" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aa90d7ce82d4be67b64039a3d588d38dbcc6736577de4a847025ce5b0c468d1" +checksum = "dc890384c8602f339876ded803c97ad529f3842aba97f6392b3dba0dd171769b" +dependencies = [ + "equator", +] [[package]] name = "alloc-no-stdlib" @@ -49,9 +52,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" [[package]] name = "anyhow" @@ -170,9 +173,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.23" +version = "0.4.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b37fc50485c4f3f736a4fb14199f6d5f5ba008d7f28fe710306c92780f004c07" +checksum = "40f6024f3f856663b45fd0c9b6f2024034a702f453549449e0d84a305900dad4" dependencies = [ "brotli", "flate2", @@ -219,9 +222,9 @@ dependencies = [ [[package]] name = "atomic" -version = "0.6.0" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d818003e740b63afc82337e3160717f4f63078720a810b7b903e70a5d1d2994" +checksum = "a89cbf775b137e9b968e67227ef7f775587cde3fd31b0d8599dbd0f598a48340" dependencies = [ "bytemuck", ] @@ -234,15 +237,15 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "av1-grain" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6678909d8c5d46a42abcf571271e15fdbc0a225e3646cf23762cd415046c78bf" +checksum = "4f3efb2ca85bc610acfa917b5aaa36f3fcbebed5b3182d7f877b02531c4b80c8" dependencies = [ "anyhow", "arrayvec", @@ -263,9 +266,9 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b756939cb2f8dc900aa6dcd505e6e2428e9cae7ff7b028c49e3946efa70878" +checksum = "93fcc8f365936c834db5514fc45aee5b1202d677e6b40e48468aaaa8183ca8c7" dependencies = [ "aws-lc-sys", "zeroize", @@ -273,9 +276,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.28.2" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa9b6986f250236c27e5a204062434a773a13243d2ffc2955f37bdba4c5c6a1" +checksum = "61b1d86e7705efe1be1b569bab41d4fa1e14e220b60a160f78de2db687add079" dependencies = [ "bindgen 0.69.5", "cc", @@ -442,9 +445,9 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.7.3" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "basic-toml" @@ -461,7 +464,7 @@ version = "0.69.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cexpr", "clang-sys", "itertools 0.12.1", @@ -484,7 +487,7 @@ version = "0.71.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f58bf3d7db68cfbac37cfc485a8d711e87e064c3d0fe0435b92f7a407f9d6b3" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cexpr", "clang-sys", "itertools 0.13.0", @@ -510,9 +513,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "1b8e56985ec62d17e9c1001dc89c88ecd7dc08e47eba5ec7c29c7b5eeecde967" [[package]] name = "bitstream-io" @@ -582,15 +585,15 @@ checksum = "f4ad8f11f288f48ca24471bbd51ac257aaeaaa07adae295591266b792902ae64" [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "793db76d6187cd04dff33004d8e6c9cc4e05cd330500379d2394209271b4aeee" [[package]] name = "bytemuck" -version = "1.23.0" +version = "1.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9134a6ef01ce4b366b50689c94f82c14bc72bc5d0386829828a2e2752ef7958c" +checksum = "5c76a5792e44e4abe34d3abf15636779261d45a7450612059293d1d2cfc63422" [[package]] name = "byteorder" @@ -638,9 +641,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.22" +version = "1.2.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32db95edf998450acc7881c932f94cd9b05c87b4b2599e8bab064753da4acfd1" +checksum = "d487aa071b5f64da6f19a3e848e3578944b726ee5a4854b82172f02aa876bfdc" dependencies = [ "jobserver", "libc", @@ -668,9 +671,9 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "9555578bc9e57714c812a1f84e4fc5b4d21fcb063490c624de019f7464c91268" [[package]] name = "cfg_aliases" @@ -709,9 +712,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.38" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed93b9805f8ba930df42c2590f05453d5ec36cbb85d018868a5b24d31f6ac000" +checksum = "40b6887a1d8685cebccf115538db5c0efe625ccac9696ad45c409d96566e910f" dependencies = [ "clap_builder", "clap_derive", @@ -719,9 +722,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.38" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379026ff283facf611b0ea629334361c4211d1b12ee01024eec1591133b04120" +checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" dependencies = [ "anstyle", "clap_lex", @@ -729,9 +732,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.32" +version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09176aae279615badda0765c0c0b3f6ed53f4709118af73cf4655d85d1530cd7" +checksum = "d2c7947ae4cc3d851207c1adb5b5e260ff0cca11446b1d6d1423788e442257ce" dependencies = [ "heck", "proc-macro2", @@ -741,9 +744,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.4" +version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" [[package]] name = "cmake" @@ -1088,19 +1091,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2459fc9262a1aa204eb4b5764ad4f189caec88aea9634389c0a25f8be7f6265e" [[package]] -name = "coolor" -version = "1.0.0" +name = "convert_case" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "691defa50318376447a73ced869862baecfab35f6aabaa91a4cd726b315bfe1a" +checksum = "bb402b8d4c85569410425650ce3eddc7d698ed96d39a73f941b08fb63082f1e7" dependencies = [ - "crossterm", + "unicode-segmentation", +] + +[[package]] +name = "coolor" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "980c2afde4af43d6a05c5be738f9eae595cff86dce1f38f88b95058a98c027f3" +dependencies = [ + "crossterm 0.29.0", ] [[package]] name = "core-foundation" -version = "0.10.0" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -1148,12 +1160,12 @@ checksum = "790eea4361631c5e7d22598ecd5723ff611904e3344ce8720784c93e3d83d40b" [[package]] name = "crokey" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5ff945e42bb93d29b10ba509970066a269903a932f0ea07d99d8621f97e90d7" +checksum = "5282b45c96c5978c8723ea83385cb9a488b64b7d175733f48d07bf9da514a863" dependencies = [ "crokey-proc_macros", - "crossterm", + "crossterm 0.29.0", "once_cell", "serde", "strict", @@ -1161,11 +1173,11 @@ dependencies = [ [[package]] name = "crokey-proc_macros" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "665f2180fd82d0ba2bf3deb45fafabb18f23451024ff71ee47f6bfdfb4bbe09e" +checksum = "2ea0218d3fedf0797fa55676f1964ef5d27103d41ed0281b4bbd2a6e6c3d8d28" dependencies = [ - "crossterm", + "crossterm 0.29.0", "proc-macro2", "quote", "strict", @@ -1234,12 +1246,30 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "crossterm_winapi", "futures-core", "mio", "parking_lot", - "rustix", + "rustix 0.38.44", + "signal-hook", + "signal-hook-mio", + "winapi", +] + +[[package]] +name = "crossterm" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8b9f2e4c67f833b660cdb0a3523065869fb35570177239812ed4c905aeff87b" +dependencies = [ + "bitflags 2.9.1", + "crossterm_winapi", + "derive_more", + "document-features", + "mio", + "parking_lot", + "rustix 1.0.7", "signal-hook", "signal-hook-mio", "winapi", @@ -1337,9 +1367,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid", "zeroize", @@ -1354,6 +1384,27 @@ dependencies = [ "powerfmt", ] +[[package]] +name = "derive_more" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +dependencies = [ + "derive_more-impl", +] + +[[package]] +name = "derive_more-impl" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +dependencies = [ + "convert_case", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "digest" version = "0.10.7" @@ -1376,6 +1427,15 @@ dependencies = [ "syn", ] +[[package]] +name = "document-features" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95249b50c6c185bee49034bcb378a49dc2b5dff0be90ff6616d31d64febab05d" +dependencies = [ + "litrs", +] + [[package]] name = "dunce" version = "1.0.5" @@ -1428,6 +1488,26 @@ dependencies = [ "syn", ] +[[package]] +name = "equator" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4711b213838dfee0117e3be6ac926007d7f433d7bbe33595975d4190cb07e6fc" +dependencies = [ + "equator-macro", +] + +[[package]] +name = "equator-macro" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -1436,12 +1516,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.11" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +checksum = "778e2ac28f6c47af28e4907f13ffd1e1ddbd400980a9abd7c8df189bf578a5ad" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.60.2", ] [[package]] @@ -1522,9 +1602,9 @@ dependencies = [ [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "miniz_oxide", @@ -1557,9 +1637,9 @@ dependencies = [ [[package]] name = "fs-err" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f89bda4c2a21204059a977ed3bfe746677dfd137b83c339e702b0ac91d482aa" +checksum = "88d7be93788013f265201256d58f04936a8079ad5dc898743aa20525f503b683" dependencies = [ "autocfg", "tokio", @@ -1671,10 +1751,11 @@ dependencies = [ [[package]] name = "generator" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6bd114ceda131d3b1d665eba35788690ad37f5916457286b32ab6fd3c438dd" +checksum = "d18470a76cb7f8ff746cf1f7470914f900252ec36bbc40b569d74b1258446827" dependencies = [ + "cc", "cfg-if", "libc", "log", @@ -1701,7 +1782,7 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "wasm-bindgen", ] @@ -1784,9 +1865,9 @@ checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" [[package]] name = "hashbrown" -version = "0.15.3" +version = "0.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b26c544d002229e640969970a2e74021aadf6e2f96372b9c58eff97de08eb3" +checksum = "5971ac85611da7067dbfcabef3c70ebb5606018acd9e2a3903a0da507521e0d5" [[package]] name = "hdrhistogram" @@ -1803,11 +1884,11 @@ dependencies = [ [[package]] name = "headers" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" +checksum = "b3314d5adb5d94bcdf56771f2e50dbbc80bb4bdf88967526706205ac9eff24eb" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "bytes", "headers-core", "http", @@ -1833,9 +1914,9 @@ checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hermit-abi" -version = "0.3.9" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -2063,11 +2144,10 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.5" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d191583f3da1305256f22463b9bb0471acad48a4e534a5218b9963e9c1f59b2" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "futures-util", "http", "hyper", "hyper-util", @@ -2077,7 +2157,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", - "webpki-roots", + "webpki-roots 1.0.1", ] [[package]] @@ -2161,9 +2241,9 @@ checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2549ca8c7241c82f59c80ba2a6f415d931c5b58d24fb8412caa1a1f02c49139a" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", "icu_collections", @@ -2177,9 +2257,9 @@ dependencies = [ [[package]] name = "icu_properties_data" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8197e866e47b68f8f7d95249e172903bec06004b18b2937f1095d40a0c57de04" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" @@ -2244,9 +2324,9 @@ dependencies = [ [[package]] name = "image-webp" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b77d01e822461baa8409e156015a1d91735549f0f2c17691bd2d996bef238f7f" +checksum = "f6970fe7a5300b4b42e62c52efa0187540a5bef546c60edaf554ef595d2e6f0b" dependencies = [ "byteorder-lite", "quick-error", @@ -2275,7 +2355,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" dependencies = [ "equivalent", - "hashbrown 0.15.3", + "hashbrown 0.15.4", "serde", ] @@ -2474,9 +2554,9 @@ checksum = "03087c2bad5e1034e8cace5926dec053fb3790248370865f5117a7d0213354c8" [[package]] name = "libc" -version = "0.2.172" +version = "0.2.174" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "1171693293099992e19cddea4e8b849964e9846f4acee11b3948bcc337be8776" [[package]] name = "libfuzzer-sys" @@ -2490,12 +2570,12 @@ dependencies = [ [[package]] name = "libloading" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a793df0d7afeac54f95b471d3af7f0d4fb975699f972341a4b76988d49cdf0c" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", - "windows-targets 0.53.0", + "windows-targets 0.53.2", ] [[package]] @@ -2521,6 +2601,12 @@ version = "0.4.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" +[[package]] +name = "linux-raw-sys" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" + [[package]] name = "litemap" version = "0.8.0" @@ -2528,10 +2614,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] -name = "lock_api" -version = "0.4.12" +name = "litrs" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "b4ce301924b7887e9d637144fdade93f9dfff9b60981d4ac161db09720d39aa5" + +[[package]] +name = "lock_api" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -2584,6 +2676,12 @@ dependencies = [ "linked-hash-map", ] +[[package]] +name = "lru-slab" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + [[package]] name = "lz4-sys" version = "1.11.1+lz4-1.10.0" @@ -2659,9 +2757,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "32a282da65faaf38286cf3be983213fcf1d2e2a58700e808f83f4ea9a4804bc0" [[package]] name = "mime" @@ -2716,9 +2814,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", "simd-adler32", @@ -2726,14 +2824,14 @@ dependencies = [ [[package]] name = "mio" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", ] [[package]] @@ -2767,7 +2865,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "cfg-if", "cfg_aliases", "libc", @@ -2897,9 +2995,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" +checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ "hermit-abi", "libc", @@ -3013,11 +3111,12 @@ dependencies = [ [[package]] name = "os_info" -version = "3.10.0" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a604e53c24761286860eba4e2c8b23a0161526476b1de520139d69cdb85a6b5" +checksum = "d0e1ac5fde8d43c34139135df8ea9ee9465394b2d8d20f032d38998f64afffc3" dependencies = [ "log", + "plist", "serde", "windows-sys 0.52.0", ] @@ -3036,9 +3135,9 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -3046,9 +3145,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", @@ -3189,6 +3288,19 @@ version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" +[[package]] +name = "plist" +version = "1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d77244ce2d584cd84f6a15f86195b8c9b2a0dfbfd817c09e0464244091a58ed" +dependencies = [ + "base64 0.22.1", + "indexmap 2.9.0", + "quick-xml", + "serde", + "time", +] + [[package]] name = "png" version = "0.17.16" @@ -3204,9 +3316,9 @@ dependencies = [ [[package]] name = "portable-atomic" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "350e9b48cbc6b0e028b0473b114454c6316e57336ee184ceab6e53f72c178b3e" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" [[package]] name = "potential_utf" @@ -3240,9 +3352,9 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "prettyplease" -version = "0.2.31" +version = "0.2.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5316f57387668042f561aae71480de936257848f9c43ce528e311d89a07cadeb" +checksum = "061c1221631e079b26479d25bbf2275bfe5917ae8419cd7e34f13bfc2aa7539a" dependencies = [ "proc-macro2", "syn", @@ -3336,7 +3448,7 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "memchr", "pulldown-cmark-escape", "unicase", @@ -3364,10 +3476,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" [[package]] -name = "quinn" -version = "0.11.7" +name = "quick-xml" +version = "0.37.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3bd15a6f2967aef83887dcb9fec0014580467e33720d073560cf015a5683012" +checksum = "331e97a1af0bf59823e6eadffe373d7b27f485be8748f71471c662c1f269b7fb" +dependencies = [ + "memchr", +] + +[[package]] +name = "quinn" +version = "0.11.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "626214629cda6781b6dc1d316ba307189c85ba657213ce642d9c77670f8202c8" dependencies = [ "bytes", "cfg_aliases", @@ -3385,12 +3506,13 @@ dependencies = [ [[package]] name = "quinn-proto" -version = "0.11.10" +version = "0.11.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b820744eb4dc9b57a3398183639c511b5a26d2ed702cedd3febaa1393caa22cc" +checksum = "49df843a9161c85bb8aae55f101bc0bac8bcafd637a620d9122fd7e0b2f7422e" dependencies = [ "bytes", "getrandom 0.3.3", + "lru-slab", "rand 0.9.1", "ring", "rustc-hash 2.1.1", @@ -3405,9 +3527,9 @@ dependencies = [ [[package]] name = "quinn-udp" -version = "0.5.11" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "541d0f57c6ec747a90738a52741d3221f7960e8ac2f0ff4b1a63680e033b4ab5" +checksum = "fcebb1209ee276352ef14ff8732e24cc2b02bbac986cd74a4c81bcb2f9881970" dependencies = [ "cfg_aliases", "libc", @@ -3428,9 +3550,9 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "rand" @@ -3563,11 +3685,11 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.12" +version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "928fca9cf2aa042393a8325b9ead81d2f0df4cb12e1e24cef072922ccd99c5af" +checksum = "0d04b7d0ee6b4a0207a0a7adb104d23ecb0b47d6beae7152d0fa34b692b29fd6" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] [[package]] @@ -3660,7 +3782,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots", + "webpki-roots 0.26.11", "windows-registry", ] @@ -3916,9 +4038,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "989e6739f80c4ad5b13e0fd7fe89531180375b18520cc8c82080e4dc4035b84f" [[package]] name = "rustc-hash" @@ -3947,18 +4069,31 @@ version = "0.38.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "errno", "libc", - "linux-raw-sys", + "linux-raw-sys 0.4.15", + "windows-sys 0.59.0", +] + +[[package]] +name = "rustix" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266" +dependencies = [ + "bitflags 2.9.1", + "errno", + "libc", + "linux-raw-sys 0.9.4", "windows-sys 0.59.0", ] [[package]] name = "rustls" -version = "0.23.27" +version = "0.23.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "730944ca083c1c233a75c09f199e973ca499344a2b7ba9e755c457e86fb4a321" +checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" dependencies = [ "aws-lc-rs", "log", @@ -4015,23 +4150,23 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "rustyline-async" version = "0.4.3" source = "git+https://forgejo.ellis.link/continuwuation/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c" dependencies = [ - "crossterm", + "crossterm 0.28.1", "futures-channel", "futures-util", "pin-project", "thingbuf", "thiserror 2.0.12", "unicode-segmentation", - "unicode-width 0.2.0", + "unicode-width 0.2.1", ] [[package]] @@ -4085,7 +4220,7 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", "core-foundation", "core-foundation-sys", "libc", @@ -4127,7 +4262,7 @@ dependencies = [ "sentry-tracing", "tokio", "ureq", - "webpki-roots", + "webpki-roots 0.26.11", ] [[package]] @@ -4310,9 +4445,9 @@ dependencies = [ [[package]] name = "serde_spanned" -version = "0.6.8" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" dependencies = [ "serde", ] @@ -4381,9 +4516,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8621587d4798caf8eb44879d42e56b9a93ea5dcd315a6487c357130095b62801" +checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" dependencies = [ "libc", "signal-hook-registry", @@ -4441,12 +4576,9 @@ checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" [[package]] name = "slab" -version = "0.4.9" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "04dc19736151f35336d325007ac991178d504a119863a2fcb3758cdb5e52c50d" [[package]] name = "smallstr" @@ -4460,18 +4592,18 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" dependencies = [ "serde", ] [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", @@ -4541,9 +4673,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" -version = "2.0.101" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce2b7fc941b3a24138a0a7cf8e858bfc6a992e7978a068a5c760deb0ed43caf" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", @@ -4674,12 +4806,11 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.8" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +checksum = "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" dependencies = [ "cfg-if", - "once_cell", ] [[package]] @@ -4801,9 +4932,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.0" +version = "1.45.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165" +checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779" dependencies = [ "backtrace", "bytes", @@ -4888,9 +5019,9 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.22" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ae329d1f08c4d17a59bed7ff5b5a769d062e64a62d34a3261b219e62cd5aae" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", @@ -4900,18 +5031,18 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.9" +version = "0.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da5db5a963e24bc68be8b17b6fa82814bb22ee8660f192bb182771d498f09a3" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" dependencies = [ "serde", ] [[package]] name = "toml_edit" -version = "0.22.26" +version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310068873db2c5b3e7659d2cc35d21855dbafa50d1ce336397c666e3cb08137e" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ "indexmap 2.9.0", "serde", @@ -4923,9 +5054,9 @@ dependencies = [ [[package]] name = "toml_write" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfb942dfe1d8e29a7ee7fcbde5bd2b9a25fb89aa70caea2eba3bee836ff41076" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" [[package]] name = "tonic" @@ -4994,12 +5125,12 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.4" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fdb0c213ca27a9f57ab69ddb290fd80d970922355b83ae380b395d3986b8a2e" +checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" dependencies = [ "async-compression", - "bitflags 2.9.0", + "bitflags 2.9.1", "bytes", "futures-core", "futures-util", @@ -5183,9 +5314,9 @@ checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-width" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" +checksum = "4a1a07cc7db3810833284e8d372ccdc6da29741639ecc70c9ec107df0fa6154c" [[package]] name = "unsafe-libyaml" @@ -5211,7 +5342,7 @@ dependencies = [ "rustls", "rustls-pki-types", "url", - "webpki-roots", + "webpki-roots 0.26.11", ] [[package]] @@ -5246,19 +5377,21 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.16.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +checksum = "3cf4199d1e5d15ddd86a694e4d0dffa9c323ce759fea589f00fef9d81cc1931d" dependencies = [ "getrandom 0.3.3", + "js-sys", "serde", + "wasm-bindgen", ] [[package]] name = "v_frame" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6f32aaa24bacd11e488aa9ba66369c7cd514885742c9fe08cfe85884db3e92b" +checksum = "666b7727c8875d6ab5db9533418d7c764233ac9c0cff1d469aec8fa127597be2" dependencies = [ "aligned-vec", "num-traits", @@ -5300,9 +5433,9 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" @@ -5428,18 +5561,27 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.8" +version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2210b291f7ea53617fbafcc4939f10914214ec15aace5ba62293a668f322c5c9" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.1", +] + +[[package]] +name = "webpki-roots" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8782dd5a41a24eed3a4f40b606249b3e236ca61adf1f25ea4d45c73de122b502" dependencies = [ "rustls-pki-types", ] [[package]] name = "weezl" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53a85b86a771b1c87058196170769dd264f66c0782acf1ae6cc51bfd64b39082" +checksum = "a751b3277700db47d3e574514de2eced5e54dc8a5436a3bf7a0b248b2cee16f3" [[package]] name = "which" @@ -5450,7 +5592,7 @@ dependencies = [ "either", "home", "once_cell", - "rustix", + "rustix 0.38.44", ] [[package]] @@ -5489,32 +5631,55 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.58.0" +version = "0.61.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd04d41d93c4992d421894c18c8b43496aa748dd4c081bac0dc93eb0489272b6" +checksum = "9babd3a767a4c1aef6900409f85f5d53ce2544ccdfaa86dad48c91782c6d6893" +dependencies = [ + "windows-collections", + "windows-core", + "windows-future", + "windows-link", + "windows-numerics", +] + +[[package]] +name = "windows-collections" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8" dependencies = [ "windows-core", - "windows-targets 0.52.6", ] [[package]] name = "windows-core" -version = "0.58.0" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ba6d44ec8c2591c134257ce647b7ea6b20335bf6379a27dac5f1641fcf59f99" +checksum = "c0fdd3ddb90610c7638aa2b3a3ab2904fb9e5cdbecc643ddb3647212781c4ae3" dependencies = [ "windows-implement", "windows-interface", - "windows-result 0.2.0", - "windows-strings 0.1.0", - "windows-targets 0.52.6", + "windows-link", + "windows-result", + "windows-strings 0.4.2", +] + +[[package]] +name = "windows-future" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc6a41e98427b19fe4b73c550f060b59fa592d7d686537eebf9385621bfbad8e" +dependencies = [ + "windows-core", + "windows-link", + "windows-threading", ] [[package]] name = "windows-implement" -version = "0.58.0" +version = "0.60.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bbd5b46c938e506ecbce286b6628a02171d56153ba733b6c741fc627ec9579b" +checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" dependencies = [ "proc-macro2", "quote", @@ -5523,9 +5688,9 @@ dependencies = [ [[package]] name = "windows-interface" -version = "0.58.0" +version = "0.59.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053c4c462dc91d3b1504c6fe5a726dd15e216ba718e84a0e46a88fbe5ded3515" +checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" dependencies = [ "proc-macro2", "quote", @@ -5534,9 +5699,19 @@ dependencies = [ [[package]] name = "windows-link" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" + +[[package]] +name = "windows-numerics" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1" +dependencies = [ + "windows-core", + "windows-link", +] [[package]] name = "windows-registry" @@ -5544,39 +5719,20 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4286ad90ddb45071efd1a66dfa43eb02dd0dfbae1545ad6cc3c51cf34d7e8ba3" dependencies = [ - "windows-result 0.3.2", + "windows-result", "windows-strings 0.3.1", - "windows-targets 0.53.0", + "windows-targets 0.53.2", ] [[package]] name = "windows-result" -version = "0.2.0" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" -dependencies = [ - "windows-targets 0.52.6", -] - -[[package]] -name = "windows-result" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +checksum = "56f42bd332cc6c8eac5af113fc0c1fd6a8fd2aa08a0119358686e5160d0586c6" dependencies = [ "windows-link", ] -[[package]] -name = "windows-strings" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" -dependencies = [ - "windows-result 0.2.0", - "windows-targets 0.52.6", -] - [[package]] name = "windows-strings" version = "0.3.1" @@ -5586,6 +5742,15 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-strings" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56e6c93f3a0c3b36176cb1327a4958a0353d5d166c2a35cb268ace15e91d3b57" +dependencies = [ + "windows-link", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -5613,6 +5778,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.2", +] + [[package]] name = "windows-targets" version = "0.48.5" @@ -5646,9 +5820,9 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.0" +version = "0.53.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b" +checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" dependencies = [ "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", @@ -5660,6 +5834,15 @@ dependencies = [ "windows_x86_64_msvc 0.53.0", ] +[[package]] +name = "windows-threading" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b66463ad2e0ea3bbf808b7f1d371311c80e115c0b71d60efc142cafbcfb057a6" +dependencies = [ + "windows-link", +] + [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -5800,9 +5983,9 @@ checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" [[package]] name = "winnow" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06928c8748d81b05c9be96aad92e1b6ff01833332f281e8cfca3be4b35fc9ec" +checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd" dependencies = [ "memchr", ] @@ -5823,7 +6006,7 @@ version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.1", ] [[package]] @@ -5875,18 +6058,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1702d9583232ddb9174e01bb7c15a2ab8fb1bc6f227aa1233858c351a3ba0cb" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.25" +version = "0.8.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28a6e20d751156648aa063f3800b706ee209a32c0b4d9f24be3d980b01be55ef" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" dependencies = [ "proc-macro2", "quote", @@ -5999,9 +6182,9 @@ dependencies = [ [[package]] name = "zune-jpeg" -version = "0.4.14" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99a5bab8d7dedf81405c4bb1f2b83ea057643d9cb28778cea9eecddeedd2e028" +checksum = "0f6fe2e33d02a98ee64423802e16df3de99c43e5cf5ff983767e1128b394c8ac" dependencies = [ "zune-core", ] From bae8192fb3cef6dbe6a14015ab46639df14ee288 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 20 Jun 2025 23:39:20 +0100 Subject: [PATCH 010/270] chore: Bump resolv-conf from 0.7.1 to 0.7.4 --- Cargo.lock | 7 ++----- Cargo.toml | 9 ++++----- 2 files changed, 6 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 59662da2..a8bab900 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3788,11 +3788,8 @@ dependencies = [ [[package]] name = "resolv-conf" -version = "0.7.1" -source = "git+https://forgejo.ellis.link/continuwuation/resolv-conf?rev=200e958941d522a70c5877e3d846f55b5586c68d#200e958941d522a70c5877e3d846f55b5586c68d" -dependencies = [ - "hostname", -] +version = "0.7.4" +source = "git+https://forgejo.ellis.link/continuwuation/resolv-conf?rev=56251316cc4127bcbf36e68ce5e2093f4d33e227#56251316cc4127bcbf36e68ce5e2093f4d33e227" [[package]] name = "rgb" diff --git a/Cargo.toml b/Cargo.toml index af904447..0ebb758c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -381,7 +381,7 @@ features = [ "unstable-msc4121", "unstable-msc4125", "unstable-msc4186", - "unstable-msc4203", # sending to-device events to appservices + "unstable-msc4203", # sending to-device events to appservices "unstable-msc4210", # remove legacy mentions "unstable-extensible-events", "unstable-pdu", @@ -580,12 +580,11 @@ rev = "9c8e51510c35077df888ee72a36b4b05637147da" git = "https://forgejo.ellis.link/continuwuation/hyper-util" rev = "e4ae7628fe4fcdacef9788c4c8415317a4489941" -# allows no-aaaa option in resolv.conf -# bumps rust edition and toolchain to 1.86.0 and 2024 -# use sat_add on line number errors +# Allows no-aaaa option in resolv.conf +# Use 1-indexed line numbers when displaying parse error messages [patch.crates-io.resolv-conf] git = "https://forgejo.ellis.link/continuwuation/resolv-conf" -rev = "200e958941d522a70c5877e3d846f55b5586c68d" +rev = "56251316cc4127bcbf36e68ce5e2093f4d33e227" # # Our crates From 70df8364b38137011b540cf39821967c91c41efa Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 21 Jun 2025 00:50:02 +0100 Subject: [PATCH 011/270] chore: Bump rustyline-async from 0.4.3 to 0.4.6 --- Cargo.lock | 31 +++++++------------------------ Cargo.toml | 6 +++--- 2 files changed, 10 insertions(+), 27 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a8bab900..7852d2ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1105,7 +1105,7 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "980c2afde4af43d6a05c5be738f9eae595cff86dce1f38f88b95058a98c027f3" dependencies = [ - "crossterm 0.29.0", + "crossterm", ] [[package]] @@ -1165,7 +1165,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5282b45c96c5978c8723ea83385cb9a488b64b7d175733f48d07bf9da514a863" dependencies = [ "crokey-proc_macros", - "crossterm 0.29.0", + "crossterm", "once_cell", "serde", "strict", @@ -1177,7 +1177,7 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2ea0218d3fedf0797fa55676f1964ef5d27103d41ed0281b4bbd2a6e6c3d8d28" dependencies = [ - "crossterm 0.29.0", + "crossterm", "proc-macro2", "quote", "strict", @@ -1240,23 +1240,6 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" -[[package]] -name = "crossterm" -version = "0.28.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "829d955a0bb380ef178a640b91779e3987da38c9aea133b20614cfed8cdea9c6" -dependencies = [ - "bitflags 2.9.1", - "crossterm_winapi", - "futures-core", - "mio", - "parking_lot", - "rustix 0.38.44", - "signal-hook", - "signal-hook-mio", - "winapi", -] - [[package]] name = "crossterm" version = "0.29.0" @@ -1267,6 +1250,7 @@ dependencies = [ "crossterm_winapi", "derive_more", "document-features", + "futures-core", "mio", "parking_lot", "rustix 1.0.7", @@ -4153,11 +4137,10 @@ checksum = "8a0d197bd2c9dc6e53b84da9556a69ba4cdfab8619eb41a8bd1cc2027a0f6b1d" [[package]] name = "rustyline-async" -version = "0.4.3" -source = "git+https://forgejo.ellis.link/continuwuation/rustyline-async?rev=deaeb0694e2083f53d363b648da06e10fc13900c#deaeb0694e2083f53d363b648da06e10fc13900c" +version = "0.4.6" +source = "git+https://forgejo.ellis.link/continuwuation/rustyline-async?rev=e9f01cf8c6605483cb80b3b0309b400940493d7f#e9f01cf8c6605483cb80b3b0309b400940493d7f" dependencies = [ - "crossterm 0.28.1", - "futures-channel", + "crossterm", "futures-util", "pin-project", "thingbuf", diff --git a/Cargo.toml b/Cargo.toml index 0ebb758c..f3de5d7f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -556,11 +556,11 @@ rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" git = "https://forgejo.ellis.link/continuwuation/tracing" rev = "1e64095a8051a1adf0d1faa307f9f030889ec2aa" -# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/de26100b0db03e419a3d8e1dd26895d170d1fe50 -# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/commit/67d8c49aeac03a5ef4e818f663eaa94dd7bf339b +# adds a tab completion callback: https://forgejo.ellis.link/continuwuation/rustyline-async/src/branch/main/.patchy/0002-add-tab-completion-callback.patch +# adds event for CTRL+\: https://forgejo.ellis.link/continuwuation/rustyline-async/src/branch/main/.patchy/0001-add-event-for-ctrl.patch [patch.crates-io.rustyline-async] git = "https://forgejo.ellis.link/continuwuation/rustyline-async" -rev = "deaeb0694e2083f53d363b648da06e10fc13900c" +rev = "e9f01cf8c6605483cb80b3b0309b400940493d7f" # adds LIFO queue scheduling; this should be updated with PR progress. [patch.crates-io.event-listener] From 93719018a881f7aa2e150c43647565995b5c6b0e Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 21 Jun 2025 17:58:28 +0100 Subject: [PATCH 012/270] ci: Run additional sanity checks on repository --- .editorconfig | 2 +- .forgejo/actions/prefligit/action.yml | 22 +++++++++++ .forgejo/workflows/prefligit-checks.yml | 15 ++++++++ .forgejo/workflows/rust-checks.yml | 2 +- .pre-commit-config.yaml | 50 +++++++++++++++++++++++++ 5 files changed, 89 insertions(+), 2 deletions(-) create mode 100644 .forgejo/actions/prefligit/action.yml create mode 100644 .forgejo/workflows/prefligit-checks.yml create mode 100644 .pre-commit-config.yaml diff --git a/.editorconfig b/.editorconfig index 91f073bd..17bb0c17 100644 --- a/.editorconfig +++ b/.editorconfig @@ -23,6 +23,6 @@ indent_size = 2 indent_style = tab max_line_length = 98 -[{.forgejo/**/*.yml,.github/**/*.yml}] +[{**/*.yml}] indent_size = 2 indent_style = space diff --git a/.forgejo/actions/prefligit/action.yml b/.forgejo/actions/prefligit/action.yml new file mode 100644 index 00000000..10f47af9 --- /dev/null +++ b/.forgejo/actions/prefligit/action.yml @@ -0,0 +1,22 @@ +name: reflighit +description: | + Runs reflighit, pre-commit reimplemented in Rust. +inputs: + extra_args: + description: options to pass to pre-commit run + required: false + default: '--all-files' + +runs: + using: composite + steps: + - name: Install Prefligit + shell: bash + run: | + curl --proto '=https' --tlsv1.2 -LsSf https://github.com/j178/prefligit/releases/download/v0.0.10/prefligit-installer.sh | sh + - uses: actions/cache@v3 + with: + path: ~/.cache/prefligit + key: prefligit-0|${{ hashFiles('.pre-commit-config.yaml') }} + - run: prefligit run --show-diff-on-failure --color=always ${{ inputs.extra_args }} + shell: bash diff --git a/.forgejo/workflows/prefligit-checks.yml b/.forgejo/workflows/prefligit-checks.yml new file mode 100644 index 00000000..8eae8451 --- /dev/null +++ b/.forgejo/workflows/prefligit-checks.yml @@ -0,0 +1,15 @@ +name: Checks / Prefligit + +on: + push: + +jobs: + prefligit: + runs-on: ubuntu-latest + env: + FROM_REF: ${{ github.event.pull_request.base.sha || (!github.event.forced && ( github.event.before != '0000000000000000000000000000000000000000' && github.event.before || github.sha )) || format('{0}~', github.sha) }} + TO_REF: ${{ github.sha }} + steps: + - uses: ./.forgejo/actions/prefligit + with: + extra_args: --from-ref ${{ env.FROM_REF }} --to-ref ${{ env.TO_REF }} --hook-stage manual diff --git a/.forgejo/workflows/rust-checks.yml b/.forgejo/workflows/rust-checks.yml index 35ca1ad7..105efd0f 100644 --- a/.forgejo/workflows/rust-checks.yml +++ b/.forgejo/workflows/rust-checks.yml @@ -1,4 +1,4 @@ -name: Rust Checks +name: Checks / Rust on: push: diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..fc0f9d71 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,50 @@ +default_install_hook_types: + - pre-commit + - commit-msg +default_stages: + - pre-commit + - manual + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v5.0.0 + hooks: + - id: check-byte-order-marker + - id: check-case-conflict + - id: check-symlinks + - id: destroyed-symlinks + - id: check-yaml + - id: check-json + - id: check-toml + - id: end-of-file-fixer + - id: trailing-whitespace + - id: mixed-line-ending + - id: check-merge-conflict + - id: check-added-large-files + + - repo: https://github.com/crate-ci/typos + rev: v1.26.0 + hooks: + - id: typos + + - repo: local + hooks: + - id: cargo-fmt + name: cargo fmt + entry: cargo +nightly fmt -- + language: system + types: [rust] + pass_filenames: false + stages: + - pre-commit + + - repo: local + hooks: + - id: cargo-clippy + name: cargo clippy + language: system + types: [rust] + pass_filenames: false + entry: cargo clippy --workspace --locked --no-deps --profile test -- -D warnings + stages: + - pre-commit From 46c193e74b2ce86c48ce802333a0aabce37fd6e9 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 21 Jun 2025 17:59:01 +0100 Subject: [PATCH 013/270] chore: fix end of files & trailing whitespace --- .gitattributes | 2 +- docs/deploying/arch-linux.md | 2 +- docs/deploying/docker-compose.override.yml | 1 - docs/deploying/docker-compose.with-caddy.yml | 2 +- docs/deploying/docker-compose.with-traefik.yml | 2 +- docs/deploying/nixos.md | 2 +- docs/static/_headers | 2 +- docs/static/announcements.schema.json | 2 +- docs/static/support | 2 +- theme/css/chrome.css | 1 - 10 files changed, 8 insertions(+), 10 deletions(-) diff --git a/.gitattributes b/.gitattributes index 3dfaca65..a1a845b6 100644 --- a/.gitattributes +++ b/.gitattributes @@ -84,4 +84,4 @@ Cargo.lock text *.zst binary # Text files where line endings should be preserved -*.patch -text \ No newline at end of file +*.patch -text diff --git a/docs/deploying/arch-linux.md b/docs/deploying/arch-linux.md index 52d2afb2..6e50410d 100644 --- a/docs/deploying/arch-linux.md +++ b/docs/deploying/arch-linux.md @@ -2,4 +2,4 @@ Continuwuity is available on the `archlinuxcn` repository and AUR, with the same package name `continuwuity`, which includes latest taggged version. The development version is available on AUR as `continuwuity-git` -Simply install the `continuwuity` package. Configure the service in `/etc/conduwuit/conduwuit.toml`, then enable/start the continuwuity.service. \ No newline at end of file +Simply install the `continuwuity` package. Configure the service in `/etc/conduwuit/conduwuit.toml`, then enable/start the continuwuity.service. diff --git a/docs/deploying/docker-compose.override.yml b/docs/deploying/docker-compose.override.yml index 168b1ae6..c1a7248c 100644 --- a/docs/deploying/docker-compose.override.yml +++ b/docs/deploying/docker-compose.override.yml @@ -34,4 +34,3 @@ services: # - "traefik.http.routers.to-element-web.tls.certresolver=letsencrypt" # vim: ts=2:sw=2:expandtab - diff --git a/docs/deploying/docker-compose.with-caddy.yml b/docs/deploying/docker-compose.with-caddy.yml index 3dfc9d85..dd6a778f 100644 --- a/docs/deploying/docker-compose.with-caddy.yml +++ b/docs/deploying/docker-compose.with-caddy.yml @@ -26,7 +26,7 @@ services: restart: unless-stopped volumes: - db:/var/lib/continuwuity - - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. #- ./continuwuity.toml:/etc/continuwuity.toml environment: CONTINUWUITY_SERVER_NAME: example.com # EDIT THIS diff --git a/docs/deploying/docker-compose.with-traefik.yml b/docs/deploying/docker-compose.with-traefik.yml index 9acc4221..49b7c905 100644 --- a/docs/deploying/docker-compose.with-traefik.yml +++ b/docs/deploying/docker-compose.with-traefik.yml @@ -8,7 +8,7 @@ services: restart: unless-stopped volumes: - db:/var/lib/continuwuity - - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. + - /etc/resolv.conf:/etc/resolv.conf:ro # Use the host's DNS resolver rather than Docker's. #- ./continuwuity.toml:/etc/continuwuity.toml networks: - proxy diff --git a/docs/deploying/nixos.md b/docs/deploying/nixos.md index cf2c09e4..2fdcbe5c 100644 --- a/docs/deploying/nixos.md +++ b/docs/deploying/nixos.md @@ -29,7 +29,7 @@ appropriately to use Continuwuity instead of Conduit. Due to the lack of a Continuwuity NixOS module, when using the `services.matrix-conduit` module a workaround like the one below is necessary to use UNIX sockets. This is because the UNIX -socket option does not exist in Conduit, and the module forcibly sets the `address` and +socket option does not exist in Conduit, and the module forcibly sets the `address` and `port` config options. ```nix diff --git a/docs/static/_headers b/docs/static/_headers index 6e52de9f..dd07f21b 100644 --- a/docs/static/_headers +++ b/docs/static/_headers @@ -3,4 +3,4 @@ Content-Type: application/json /.well-known/continuwuity/* Access-Control-Allow-Origin: * - Content-Type: application/json \ No newline at end of file + Content-Type: application/json diff --git a/docs/static/announcements.schema.json b/docs/static/announcements.schema.json index cacd10c9..474c0d29 100644 --- a/docs/static/announcements.schema.json +++ b/docs/static/announcements.schema.json @@ -32,4 +32,4 @@ "required": [ "announcements" ] - } \ No newline at end of file + } diff --git a/docs/static/support b/docs/static/support index 6b7a9860..88a85c7d 100644 --- a/docs/static/support +++ b/docs/static/support @@ -21,4 +21,4 @@ } ], "support_page": "https://continuwuity.org/introduction#contact" -} \ No newline at end of file +} diff --git a/theme/css/chrome.css b/theme/css/chrome.css index d6cc2b32..f14ffc2c 100644 --- a/theme/css/chrome.css +++ b/theme/css/chrome.css @@ -605,4 +605,3 @@ ul#searchresults span.teaser em { margin-inline-start: -14px; width: 14px; } - From a682e9dbb879340c63772d1978ca8b74eb28840f Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 21 Jun 2025 18:03:38 +0100 Subject: [PATCH 014/270] chore: Add commit to ignored revs --- .git-blame-ignore-revs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index 33f738f3..ddfc0568 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -5,3 +5,5 @@ f419c64aca300a338096b4e0db4c73ace54f23d0 # use chain_width 60 162948313c212193965dece50b816ef0903172ba 5998a0d883d31b866f7c8c46433a8857eae51a89 +# trailing whitespace and newlines +46c193e74b2ce86c48ce802333a0aabce37fd6e9 From 2ecbd75d64a6f48a8baaf61145d684ff880ea986 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 21 Jun 2025 18:20:04 +0100 Subject: [PATCH 015/270] ci: fixes - Install UV - Verbose run - Set permissions explicitly - Check all files --- .editorconfig | 2 +- .forgejo/actions/prefligit/action.yml | 11 ++++++++--- .forgejo/workflows/prefligit-checks.yml | 9 ++++++++- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/.editorconfig b/.editorconfig index 17bb0c17..3e7fd1b8 100644 --- a/.editorconfig +++ b/.editorconfig @@ -23,6 +23,6 @@ indent_size = 2 indent_style = tab max_line_length = 98 -[{**/*.yml}] +[*.yml] indent_size = 2 indent_style = space diff --git a/.forgejo/actions/prefligit/action.yml b/.forgejo/actions/prefligit/action.yml index 10f47af9..8cbd4500 100644 --- a/.forgejo/actions/prefligit/action.yml +++ b/.forgejo/actions/prefligit/action.yml @@ -1,6 +1,6 @@ -name: reflighit +name: prefligit description: | - Runs reflighit, pre-commit reimplemented in Rust. + Runs prefligit, pre-commit reimplemented in Rust. inputs: extra_args: description: options to pass to pre-commit run @@ -10,6 +10,11 @@ inputs: runs: using: composite steps: + - name: Install uv + uses: https://github.com/astral-sh/setup-uv@v6 + with: + enable-cache: true + ignore-nothing-to-cache: true - name: Install Prefligit shell: bash run: | @@ -18,5 +23,5 @@ runs: with: path: ~/.cache/prefligit key: prefligit-0|${{ hashFiles('.pre-commit-config.yaml') }} - - run: prefligit run --show-diff-on-failure --color=always ${{ inputs.extra_args }} + - run: prefligit run --show-diff-on-failure --color=always -v ${{ inputs.extra_args }} shell: bash diff --git a/.forgejo/workflows/prefligit-checks.yml b/.forgejo/workflows/prefligit-checks.yml index 8eae8451..cc512496 100644 --- a/.forgejo/workflows/prefligit-checks.yml +++ b/.forgejo/workflows/prefligit-checks.yml @@ -2,6 +2,9 @@ name: Checks / Prefligit on: push: + pull_request: +permissions: + contents: read jobs: prefligit: @@ -10,6 +13,10 @@ jobs: FROM_REF: ${{ github.event.pull_request.base.sha || (!github.event.forced && ( github.event.before != '0000000000000000000000000000000000000000' && github.event.before || github.sha )) || format('{0}~', github.sha) }} TO_REF: ${{ github.sha }} steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false - uses: ./.forgejo/actions/prefligit with: - extra_args: --from-ref ${{ env.FROM_REF }} --to-ref ${{ env.TO_REF }} --hook-stage manual + extra_args: --all-files --hook-stage manual From 4f174324baadfd154a48d9e6e79b20ca63770854 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 23 Jun 2025 01:04:27 +0100 Subject: [PATCH 016/270] docs: Update contributing guide --- CONTRIBUTING.md | 92 ++++++++++++++++++++------------------------- docs/development.md | 76 ++++++++++++++++++++++--------------- 2 files changed, 86 insertions(+), 82 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index da426801..c57de8b4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,6 +1,6 @@ # Contributing guide -This page is for about contributing to Continuwuity. The +This page is about contributing to Continuwuity. The [development](./development.md) page may be of interest for you as well. If you would like to work on an [issue][issues] that is not assigned, preferably @@ -10,7 +10,7 @@ and comment on it. ### Linting and Formatting It is mandatory all your changes satisfy the lints (clippy, rustc, rustdoc, etc) -and your code is formatted via the **nightly** `cargo fmt`. A lot of the +and your code is formatted via the **nightly** rustfmt (`cargo +nightly fmt`). A lot of the `rustfmt.toml` features depend on nightly toolchain. It would be ideal if they weren't nightly-exclusive features, but they currently still are. CI's rustfmt uses nightly. @@ -21,67 +21,62 @@ comment saying why. Do not write inefficient code for the sake of satisfying lints. If a lint is wrong and provides a more inefficient solution or suggestion, allow the lint and mention that in a comment. -### Running CI tests locally +### Running tests locally -continuwuity's CI for tests, linting, formatting, audit, etc use -[`engage`][engage]. engage can be installed from nixpkgs or `cargo install -engage`. continuwuity's Nix flake devshell has the nixpkgs engage with `direnv`. -Use `engage --help` for more usage details. +Tests, compilation, and linting can be run with standard Cargo commands: -To test, format, lint, etc that CI would do, install engage, allow the `.envrc` -file using `direnv allow`, and run `engage`. +```bash +# Run tests +cargo test -All of the tasks are defined at the [engage.toml][engage.toml] file. You can -view all of them neatly by running `engage list` +# Check compilation +cargo check --workspace -If you would like to run only a specific engage task group, use `just`: +# Run lints +cargo clippy --workspace +# Auto-fix: cargo clippy --workspace --fix --allow-staged; -- `engage just ` -- Example: `engage just lints` +# Format code (must use nightly) +cargo +nightly fmt +``` -If you would like to run a specific engage task in a specific group, use `just - [TASK]`: `engage just lints cargo-fmt` +### Building Docker images -The following binaries are used in [`engage.toml`][engage.toml]: +Docker images can be built using the standard Docker build command: -- [`engage`][engage] -- `nix` -- [`direnv`][direnv] -- `rustc` -- `cargo` -- `cargo-fmt` -- `rustdoc` -- `cargo-clippy` -- [`cargo-audit`][cargo-audit] -- [`cargo-deb`][cargo-deb] -- [`lychee`][lychee] -- [`markdownlint-cli`][markdownlint-cli] -- `dpkg` +```bash +docker build -f docker/Dockerfile . +``` + +The Docker image can be cross-compiled for different architectures if needed. ### Matrix tests -CI runs [Complement][complement], but currently does not fail if results from -the checked-in results differ with the new results. If your changes are done to -fix Matrix tests, note that in your pull request. If more Complement tests start -failing from your changes, please review the logs (they are uploaded as -artifacts) and determine if they're intended or not. +Continuwuity uses [Complement][complement] for Matrix protocol compliance testing. Complement tests are run manually by developers, and documentation on how to run these tests locally is currently being developed. -If you'd like to run Complement locally using Nix, see the -[testing](development/testing.md) page. +If your changes are done to fix Matrix tests, please note that in your pull request. If more Complement tests start failing from your changes, please review the logs and determine if they're intended or not. -[Sytest][sytest] support will come soon. +[Sytest][sytest] is currently unsupported. ### Writing documentation -Continuwuity's website uses [`mdbook`][mdbook] and deployed via CI using GitHub -Pages in the [`documentation.yml`][documentation.yml] workflow file with Nix's -mdbook in the devshell. All documentation is in the `docs/` directory at the top -level. The compiled mdbook website is also uploaded as an artifact. +Continuwuity's website uses [`mdbook`][mdbook] and is deployed via CI using Cloudflare Pages +in the [`documentation.yml`][documentation.yml] workflow file. All documentation is in the `docs/` +directory at the top level. -To build the documentation using Nix, run: `bin/nix-build-and-cache just .#book` +To build the documentation locally: -The output of the mdbook generation is in `result/`. mdbooks can be opened in -your browser from the individual HTML files without any web server needed. +1. Install mdbook if you don't have it already: + ```bash + cargo install mdbook # or cargo binstall, or another method + ``` + +2. Build the documentation: + ```bash + mdbook build + ``` + +The output of the mdbook generation is in `public/`. You can open the HTML files directly in your browser without needing a web server. ### Inclusivity and Diversity @@ -132,13 +127,6 @@ continuwuityMatrix rooms for Code of Conduct violations. [issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues [continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org [complement]: https://github.com/matrix-org/complement/ -[engage.toml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/engage.toml -[engage]: https://charles.page.computer.surgery/engage/ [sytest]: https://github.com/matrix-org/sytest/ -[cargo-deb]: https://github.com/kornelski/cargo-deb -[lychee]: https://github.com/lycheeverse/lychee -[markdownlint-cli]: https://github.com/igorshubovych/markdownlint-cli -[cargo-audit]: https://github.com/RustSec/rustsec/tree/main/cargo-audit -[direnv]: https://direnv.net/ [mdbook]: https://rust-lang.github.io/mdBook/ [documentation.yml]: https://forgejo.ellis.link/continuwuation/continuwuity/src/branch/main/.forgejo/workflows/documentation.yml diff --git a/docs/development.md b/docs/development.md index 1e344f41..dfbddea0 100644 --- a/docs/development.md +++ b/docs/development.md @@ -68,31 +68,22 @@ do this if Rust supported workspace-level features to begin with. ## List of forked dependencies -During Continuwuity development, we have had to fork -some dependencies to support our use-cases in some areas. This ranges from -things said upstream project won't accept for any reason, faster-paced -development (unresponsive or slow upstream), Continuwuity-specific usecases, or -lack of time to upstream some things. +During Continuwuity (and prior projects) development, we have had to fork some dependencies to support our use-cases. +These forks exist for various reasons including features that upstream projects won't accept, +faster-paced development, Continuwuity-specific usecases, or lack of time to upstream changes. -- [ruma/ruma][1]: - various performance -improvements, more features, faster-paced development, better client/server interop -hacks upstream won't accept, etc -- [facebook/rocksdb][2]: - liburing -build fixes and GCC debug build fix -- [tikv/jemallocator][3]: - musl -builds seem to be broken on upstream, fixes some broken/suspicious code in -places, additional safety measures, and support redzones for Valgrind -- [zyansheep/rustyline-async][4]: - - tab completion callback and -`CTRL+\` signal quit event for Continuwuity console CLI -- [rust-rocksdb/rust-rocksdb][5]: - - [`@zaidoon1`][8]'s fork -has quicker updates, more up to date dependencies, etc. Our fork fixes musl build -issues, removes unnecessary `gtest` include, and uses our RocksDB and jemallocator -forks. -- [tokio-rs/tracing][6]: - Implements -`Clone` for `EnvFilter` to support dynamically changing tracing envfilter's -alongside other logging/metrics things +All forked dependencies are maintained under the [continuwuation organization on Forgejo](https://forgejo.ellis.link/continuwuation): + +- [ruwuma][continuwuation-ruwuma] - Fork of [ruma/ruma][ruma] with various performance improvements, more features and better client/server interop +- [rocksdb][continuwuation-rocksdb] - Fork of [facebook/rocksdb][rocksdb] via [`@zaidoon1`][8] with liburing build fixes and GCC debug build fixes +- [jemallocator][continuwuation-jemallocator] - Fork of [tikv/jemallocator][jemallocator] fixing musl builds, suspicious code, + and adding support for redzones in Valgrind +- [rustyline-async][continuwuation-rustyline-async] - Fork of [zyansheep/rustyline-async][rustyline-async] with tab completion callback + and `CTRL+\` signal quit event for Continuwuity console CLI +- [rust-rocksdb][continuwuation-rust-rocksdb] - Fork of [rust-rocksdb/rust-rocksdb][rust-rocksdb] fixing musl build issues, + removing unnecessary `gtest` include, and using our RocksDB and jemallocator forks +- [tracing][continuwuation-tracing] - Fork of [tokio-rs/tracing][tracing] implementing `Clone` for `EnvFilter` to + support dynamically changing tracing environments ## Debugging with `tokio-console` @@ -113,12 +104,37 @@ You will also need to enable the `tokio_console` config option in Continuwuity w starting it. This was due to tokio-console causing gradual memory leak/usage if left enabled. -[1]: https://github.com/ruma/ruma/ -[2]: https://github.com/facebook/rocksdb/ -[3]: https://github.com/tikv/jemallocator/ -[4]: https://github.com/zyansheep/rustyline-async/ -[5]: https://github.com/rust-rocksdb/rust-rocksdb/ -[6]: https://github.com/tokio-rs/tracing/ +## Building Docker Images + +To build a Docker image for Continuwuity, use the standard Docker build command: + +```bash +docker build -f docker/Dockerfile . +``` + +The image can be cross-compiled for different architectures. + +## Matrix Protocol Compliance Testing + +Complement (the Matrix protocol compliance testing suite) is run manually by developers. +Documentation on how to run Complement tests locally is being developed and will be added soon. + +Sytest is currently unsupported. + +[continuwuation-ruwuma]: https://forgejo.ellis.link/continuwuation/ruwuma +[continuwuation-rocksdb]: https://forgejo.ellis.link/continuwuation/rocksdb +[continuwuation-jemallocator]: https://forgejo.ellis.link/continuwuation/jemallocator +[continuwuation-rustyline-async]: https://forgejo.ellis.link/continuwuation/rustyline-async +[continuwuation-rust-rocksdb]: https://forgejo.ellis.link/continuwuation/rust-rocksdb +[continuwuation-tracing]: https://forgejo.ellis.link/continuwuation/tracing + +[ruma]: https://github.com/ruma/ruma/ +[rocksdb]: https://github.com/facebook/rocksdb/ +[jemallocator]: https://github.com/tikv/jemallocator/ +[rustyline-async]: https://github.com/zyansheep/rustyline-async/ +[rust-rocksdb]: https://github.com/rust-rocksdb/rust-rocksdb/ +[tracing]: https://github.com/tokio-rs/tracing/ + [7]: https://docs.rs/tokio-console/latest/tokio_console/ [8]: https://github.com/zaidoon1/ [9]: https://github.com/rust-lang/cargo/issues/12162 From 4d69a1ad511b1fc81a668ef319fb1e053788dfc2 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Mon, 23 Jun 2025 01:25:38 +0100 Subject: [PATCH 017/270] docs: Deduplicate sections --- CONTRIBUTING.md | 14 ++------------ docs/development.md | 7 ------- 2 files changed, 2 insertions(+), 19 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c57de8b4..3dc99dc2 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -40,16 +40,6 @@ cargo clippy --workspace cargo +nightly fmt ``` -### Building Docker images - -Docker images can be built using the standard Docker build command: - -```bash -docker build -f docker/Dockerfile . -``` - -The Docker image can be cross-compiled for different architectures if needed. - ### Matrix tests Continuwuity uses [Complement][complement] for Matrix protocol compliance testing. Complement tests are run manually by developers, and documentation on how to run these tests locally is currently being developed. @@ -120,9 +110,9 @@ By sending a pull request or patch, you are agreeing that your changes are allowed to be licenced under the Apache-2.0 licence and all of your conduct is in line with the Contributor's Covenant, and continuwuity's Code of Conduct. -Contribution by users who violate either of these code of conducts will not have +Contribution by users who violate either of these code of conducts may not have their contributions accepted. This includes users who have been banned from -continuwuityMatrix rooms for Code of Conduct violations. +continuwuity Matrix rooms for Code of Conduct violations. [issues]: https://forgejo.ellis.link/continuwuation/continuwuity/issues [continuwuity-matrix]: https://matrix.to/#/#continuwuity:continuwuity.org diff --git a/docs/development.md b/docs/development.md index dfbddea0..68b963c8 100644 --- a/docs/development.md +++ b/docs/development.md @@ -114,13 +114,6 @@ docker build -f docker/Dockerfile . The image can be cross-compiled for different architectures. -## Matrix Protocol Compliance Testing - -Complement (the Matrix protocol compliance testing suite) is run manually by developers. -Documentation on how to run Complement tests locally is being developed and will be added soon. - -Sytest is currently unsupported. - [continuwuation-ruwuma]: https://forgejo.ellis.link/continuwuation/ruwuma [continuwuation-rocksdb]: https://forgejo.ellis.link/continuwuation/rocksdb [continuwuation-jemallocator]: https://forgejo.ellis.link/continuwuation/jemallocator From 4a289a9feedc720dd17a3af08d7dc7d4b255e55f Mon Sep 17 00:00:00 2001 From: Kimiblock Moe Date: Tue, 24 Jun 2025 19:01:21 +0800 Subject: [PATCH 018/270] arch systemd: use credentials to load config --- arch/conduwuit.service | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/conduwuit.service b/arch/conduwuit.service index c86e37bd..d5a65e4d 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -6,6 +6,7 @@ After=network-online.target Documentation=https://continuwuity.org/ RequiresMountsFor=/var/lib/private/conduwuit Alias=matrix-conduwuit.service + [Service] DynamicUser=yes Type=notify-reload @@ -59,7 +60,8 @@ StateDirectory=conduwuit RuntimeDirectory=conduwuit RuntimeDirectoryMode=0750 -Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml" +Environment=CONTINUWUITY_CONFIG=${CREDENTIALS_DIRECTORY}/config.toml +LoadCredential=config.toml:/etc/conduwuit/conduwuit.toml BindPaths=/var/lib/private/conduwuit:/var/lib/matrix-conduit BindPaths=/var/lib/private/conduwuit:/var/lib/private/matrix-conduit From 3177545a6f966c19a2f219a3641b9fa5c8bfdb38 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 24 Jun 2025 21:45:54 +0100 Subject: [PATCH 019/270] chore: Remove clippy pre-commit hook It's too slow for a good git experience --- .pre-commit-config.yaml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fc0f9d71..22bcd09c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -37,14 +37,3 @@ repos: pass_filenames: false stages: - pre-commit - - - repo: local - hooks: - - id: cargo-clippy - name: cargo clippy - language: system - types: [rust] - pass_filenames: false - entry: cargo clippy --workspace --locked --no-deps --profile test -- -D warnings - stages: - - pre-commit From 9bbe333082cc74926609a6b7c92eb76de877c452 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 24 Jun 2025 21:48:33 +0100 Subject: [PATCH 020/270] ci: Don't run docs flow when the secret is inaccessible --- .forgejo/workflows/documentation.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.forgejo/workflows/documentation.yml b/.forgejo/workflows/documentation.yml index 7d95a317..4f3e903c 100644 --- a/.forgejo/workflows/documentation.yml +++ b/.forgejo/workflows/documentation.yml @@ -17,6 +17,7 @@ jobs: docs: name: Build and Deploy Documentation runs-on: ubuntu-latest + if: secrets.CLOUDFLARE_API_TOKEN != '' steps: - name: Sync repository From eb75c4ecb0b5ac0cec38df85c634323103a28f6d Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 24 Jun 2025 22:05:52 +0100 Subject: [PATCH 021/270] chore: Fix typos in commit messages automatically --- .pre-commit-config.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 22bcd09c..facebcc4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,6 +26,9 @@ repos: rev: v1.26.0 hooks: - id: typos + - id: typos + name: commit-msg-typos + stages: [commit-msg] - repo: local hooks: From b787e97dc173da7a6673df9f5ea628a60340a497 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 24 Jun 2025 22:22:13 +0100 Subject: [PATCH 022/270] chore: Document & enforce conventional commit messages --- .pre-commit-config.yaml | 5 +++ CONTRIBUTING.md | 80 +++++++++++++++++++++++++++++++++++++++++ committed.toml | 2 ++ 3 files changed, 87 insertions(+) create mode 100644 committed.toml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index facebcc4..68e3a982 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,6 +30,11 @@ repos: name: commit-msg-typos stages: [commit-msg] + - repo: https://github.com/crate-ci/committed + rev: v1.1.7 + hooks: + - id: committed + - repo: local hooks: - id: cargo-fmt diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3dc99dc2..1c091183 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -21,6 +21,45 @@ comment saying why. Do not write inefficient code for the sake of satisfying lints. If a lint is wrong and provides a more inefficient solution or suggestion, allow the lint and mention that in a comment. +### Pre-commit Checks + +Continuwuity uses pre-commit hooks to enforce various coding standards and catch common issues before they're committed. These checks include: + +- Code formatting and linting +- Typo detection (both in code and commit messages) +- Checking for large files +- Ensuring proper line endings and no trailing whitespace +- Validating YAML, JSON, and TOML files +- Checking for merge conflicts + +You can run these checks locally by installing [prefligit](https://github.com/j178/prefligit): + + +```bash +# Install prefligit using cargo-binstall +cargo binstall prefligit + +# Install git hooks to run checks automatically +prefligit install + +# Run all checks +prefligit --all-files +``` + +Alternatively, you can use [pre-commit](https://pre-commit.com/): +```bash +# Install pre-commit +pip install pre-commit + +# Install the hooks +pre-commit install + +# Run all checks manually +pre-commit run --all-files +``` + +These same checks are run in CI via the prefligit-checks workflow to ensure consistency. + ### Running tests locally Tests, compilation, and linting can be run with standard Cargo commands: @@ -94,6 +133,40 @@ Rust's default style and standards with regards to [function names, variable names, comments](https://rust-lang.github.io/api-guidelines/naming.html), etc applies here. +### Commit Messages + +Continuwuity follows the [Conventional Commits](https://www.conventionalcommits.org/) specification for commit messages. This provides a standardized format that makes the commit history more readable and enables automated tools to generate changelogs. + +The basic structure is: +``` +[(optional scope)]: + +[optional body] + +[optional footer(s)] +``` + +The allowed types for commits are: +- `fix`: Bug fixes +- `feat`: New features +- `docs`: Documentation changes +- `style`: Changes that don't affect the meaning of the code (formatting, etc.) +- `refactor`: Code changes that neither fix bugs nor add features +- `perf`: Performance improvements +- `test`: Adding or fixing tests +- `build`: Changes to the build system or dependencies +- `ci`: Changes to CI configuration +- `chore`: Other changes that don't modify source or test files + +Examples: +``` +feat: add user authentication +fix(database): resolve connection pooling issue +docs: update installation instructions +``` + +The project uses the `committed` hook to validate commit messages in pre-commit. This ensures all commits follow the conventional format. + ### Creating pull requests Please try to keep contributions to the Forgejo Instance. While the mirrors of continuwuity @@ -103,6 +176,13 @@ This prevents us from having to ping once in a while to double check the status of it, especially when the CI completed successfully and everything so it *looks* done. +Before submitting a pull request, please ensure: +1. Your code passes all CI checks (formatting, linting, typo detection, etc.) +2. Your commit messages follow the conventional commits format +3. Tests are added for new functionality +4. Documentation is updated if needed + + Direct all PRs/MRs to the `main` branch. diff --git a/committed.toml b/committed.toml new file mode 100644 index 00000000..64f7f18a --- /dev/null +++ b/committed.toml @@ -0,0 +1,2 @@ +style = "conventional" +allowed_types = ["ci", "build", "fix", "feat", "chore", "docs", "style", "refactor", "perf", "test"] From a24278dc1b59b6d20b985e128b515257de1e5143 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 24 Jun 2025 23:12:09 +0100 Subject: [PATCH 023/270] docs: Update mirror badges --- README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index e3eb807f..e71178dc 100644 --- a/README.md +++ b/README.md @@ -11,11 +11,13 @@ It's a community continuation of the [conduwuit](https://github.com/girlbossceo/ -[![forgejo.ellis.link](https://img.shields.io/badge/Ellis%20Git-main+packages-green?style=flat&logo=forgejo&labelColor=fff)](https://forgejo.ellis.link/continuwuation/continuwuity) ![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/stars.svg?style=flat) [![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/issues/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/issues?state=open) [![](https://forgejo.ellis.link/continuwuation/continuwuity/badges/pulls/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/pulls?state=open) +[![forgejo.ellis.link](https://img.shields.io/badge/Ellis%20Git-main+packages-green?style=flat&logo=forgejo&labelColor=fff)](https://forgejo.ellis.link/continuwuation/continuwuity) [![Stars](https://forgejo.ellis.link/continuwuation/continuwuity/badges/stars.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/stars) [![Issues](https://forgejo.ellis.link/continuwuation/continuwuity/badges/issues/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/issues?state=open) [![Pull Requests](https://forgejo.ellis.link/continuwuation/continuwuity/badges/pulls/open.svg?style=flat)](https://forgejo.ellis.link/continuwuation/continuwuity/pulls?state=open) -[![GitHub](https://img.shields.io/badge/GitHub-mirror-blue?style=flat&logo=github&labelColor=fff&logoColor=24292f)](https://github.com/continuwuity/continuwuity) ![](https://img.shields.io/github/stars/continuwuity/continuwuity?style=flat) +[![GitHub](https://img.shields.io/badge/GitHub-mirror-blue?style=flat&logo=github&labelColor=fff&logoColor=24292f)](https://github.com/continuwuity/continuwuity) [![Stars](https://img.shields.io/github/stars/continuwuity/continuwuity?style=flat)](https://github.com/continuwuity/continuwuity/stargazers) -[![Codeberg](https://img.shields.io/badge/Codeberg-mirror-2185D0?style=flat&logo=codeberg&labelColor=fff)](https://codeberg.org/nexy7574/continuwuity) ![](https://codeberg.org/nexy7574/continuwuity/badges/stars.svg?style=flat) +[![GitLab](https://img.shields.io/badge/GitLab-mirror-blue?style=flat&logo=gitlab&labelColor=fff)](https://gitlab.com/continuwuity/continuwuity) [![Stars](https://img.shields.io/gitlab/stars/continuwuity/continuwuity?style=flat)](https://gitlab.com/continuwuity/continuwuity/-/starrers) + +[![Codeberg](https://img.shields.io/badge/Codeberg-mirror-2185D0?style=flat&logo=codeberg&labelColor=fff)](https://codeberg.org/continuwuity/continuwuity) [![Stars](https://codeberg.org/continuwuity/continuwuity/badges/stars.svg?style=flat)](https://codeberg.org/continuwuity/continuwuity/stars) ### Why does this exist? From 63962fc04098802abdf3334f147f25ada0f1d36c Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 24 Jun 2025 23:13:28 +0100 Subject: [PATCH 024/270] docs: Remove completed items from the README --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index e71178dc..506a62db 100644 --- a/README.md +++ b/README.md @@ -61,8 +61,6 @@ There are currently no open registration Continuwuity instances available. We're working our way through all of the issues in the [Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues). -- [Replacing old conduwuit links with working continuwuity links](https://forgejo.ellis.link/continuwuation/continuwuity/issues/742) -- [Getting CI and docs deployment working on the new Forgejo project](https://forgejo.ellis.link/continuwuation/continuwuity/issues/740) - [Packaging & availability in more places](https://forgejo.ellis.link/continuwuation/continuwuity/issues/747) - [Appservices bugs & features](https://forgejo.ellis.link/continuwuation/continuwuity/issues?q=&type=all&state=open&labels=178&milestone=0&assignee=0&poster=0) - [Improving compatibility and spec compliance](https://forgejo.ellis.link/continuwuation/continuwuity/issues?labels=119) From f1ca84fcaff3b5a2c6cc4f1889611f7003859a33 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 24 Jun 2025 23:16:48 +0100 Subject: [PATCH 025/270] fix: Correct project brand in admin & OTEL --- src/admin/admin.rs | 2 +- src/main/logging.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/admin/admin.rs b/src/admin/admin.rs index 0d636c72..50b9db7c 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -9,7 +9,7 @@ use crate::{ }; #[derive(Debug, Parser)] -#[command(name = "conduwuit", version = conduwuit::version())] +#[command(name = conduwuit_core::name(), version = conduwuit_core::version())] pub(super) enum AdminCommand { #[command(subcommand)] /// - Commands for managing appservices diff --git a/src/main/logging.rs b/src/main/logging.rs index eeeda127..aec50bd4 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -77,7 +77,7 @@ pub(crate) fn init( ); let tracer = opentelemetry_jaeger::new_agent_pipeline() .with_auto_split_batch(true) - .with_service_name("conduwuit") + .with_service_name(conduwuit_core::name()) .install_batch(opentelemetry_sdk::runtime::Tokio) .expect("jaeger agent pipeline"); let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); From db58d841aa4c99e1bc573faea3113557cc68589e Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Fri, 25 Apr 2025 20:59:52 -0700 Subject: [PATCH 026/270] fix: Only load children of nested spaces --- src/api/client/space.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/api/client/space.rs b/src/api/client/space.rs index 92768926..23b1e80f 100644 --- a/src/api/client/space.rs +++ b/src/api/client/space.rs @@ -121,7 +121,9 @@ where .map(|(key, val)| (key, val.collect())) .collect(); - if !populate { + if populate { + rooms.push(summary_to_chunk(summary.clone())); + } else { children = children .iter() .rev() @@ -144,10 +146,8 @@ where .collect(); } - if populate { - rooms.push(summary_to_chunk(summary.clone())); - } else if queue.is_empty() && children.is_empty() { - return Err!(Request(InvalidParam("Room IDs in token were not found."))); + if !populate && queue.is_empty() && children.is_empty() { + break; } parents.insert(current_room.clone()); From c82ea24069d58b8e33d00b836ac7d5c126ffb217 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Fri, 27 Jun 2025 18:44:46 +0100 Subject: [PATCH 027/270] docs: Add Matrix chat and space badges to README --- README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/README.md b/README.md index 506a62db..60dcf81d 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,10 @@ ## A community-driven [Matrix](https://matrix.org/) homeserver in Rust +[![Chat on Matrix](https://img.shields.io/matrix/continuwuity%3Acontinuwuity.org?server_fqdn=matrix.continuwuity.org&fetchMode=summary&logo=matrix)](https://matrix.to/#/#continuwuity:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) [![Join the space](https://img.shields.io/matrix/space%3Acontinuwuity.org?server_fqdn=matrix.continuwuity.org&fetchMode=summary&logo=matrix&label=space)](https://matrix.to/#/#space:continuwuity.org?via=continuwuity.org&via=ellis.link&via=explodie.org&via=matrix.org) + + + [continuwuity] is a Matrix homeserver written in Rust. From 543ab27747dbf31b583175dd81d7d3aa3c82df79 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Fri, 27 Jun 2025 20:58:52 +0100 Subject: [PATCH 028/270] fix: Additional sanity checks when creating a PDU Prevents creating events that are most likely catastrophically invalid --- src/service/rooms/timeline/mod.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 4b2f3cb2..94d306d1 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -719,6 +719,18 @@ impl Service { ); } } + if event_type != TimelineEventType::RoomCreate && prev_events.is_empty() { + return Err!(Request(Unknown("Event incorrectly had zero prev_events."))); + } + if state_key.is_none() && depth.le(&uint!(2)) { + // The first two events in a room are always m.room.create and m.room.member, + // so any other events with that same depth are illegal. + warn!( + "Had unsafe depth {depth} when creating non-state event in {room_id}. Cowardly \ + aborting" + ); + return Err!(Request(Unknown("Unsafe depth for non-state event."))); + } let mut pdu = PduEvent { event_id: ruma::event_id!("$thiswillbefilledinlater").into(), From f508e7654c51070e2231b136506c326305f37d3c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 28 Jun 2025 00:29:07 +0000 Subject: [PATCH 029/270] fix: off by one. --- src/service/rooms/timeline/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 94d306d1..37963246 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -722,7 +722,7 @@ impl Service { if event_type != TimelineEventType::RoomCreate && prev_events.is_empty() { return Err!(Request(Unknown("Event incorrectly had zero prev_events."))); } - if state_key.is_none() && depth.le(&uint!(2)) { + if state_key.is_none() && depth.lt(&uint!(2)) { // The first two events in a room are always m.room.create and m.room.member, // so any other events with that same depth are illegal. warn!( From 52e042cb06e05a028340e93bf3ff0d2d37d211b5 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Mon, 26 May 2025 01:22:19 +0100 Subject: [PATCH 030/270] Always calculate state diff IDs in syncv3 seemingly fixes #779 --- src/api/client/sync/v3.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 8eac6b66..7bc74c95 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -1009,8 +1009,6 @@ async fn calculate_state_incremental<'a>( ) -> Result { let since_shortstatehash = since_shortstatehash.unwrap_or(current_shortstatehash); - let state_changed = since_shortstatehash != current_shortstatehash; - let encrypted_room = services .rooms .state_accessor @@ -1042,7 +1040,7 @@ async fn calculate_state_incremental<'a>( }) .into(); - let state_diff_ids: OptionFuture<_> = (!full_state && state_changed) + let state_diff_ids: OptionFuture<_> = (!full_state) .then(|| { StreamExt::into_future( services From 9b6ac6c45f44a18d68208038b360212205830a7f Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 19:57:02 +0100 Subject: [PATCH 031/270] fix: Ignore existing membership when room is disconnected --- src/api/client/membership.rs | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index e587d806..1800fa60 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -925,24 +925,32 @@ pub async fn join_room_by_id_helper( return Ok(join_room_by_id::v3::Response { room_id: room_id.into() }); } - if let Ok(membership) = services - .rooms - .state_accessor - .get_member(room_id, sender_user) - .await - { - if membership.membership == MembershipState::Ban { - debug_warn!("{sender_user} is banned from {room_id} but attempted to join"); - return Err!(Request(Forbidden("You are banned from the room."))); - } - } - let server_in_room = services .rooms .state_cache .server_in_room(services.globals.server_name(), room_id) .await; + // Only check our known membership if we're already in the room. + // See: https://forgejo.ellis.link/continuwuation/continuwuity/issues/855 + let membership = if server_in_room { + services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .await + } else { + debug!("Ignoring local state for join {room_id}, we aren't in the room yet."); + Ok(RoomMemberEventContent::new(MembershipState::Leave)) + }; + if let Ok(m) = membership { + if m.membership == MembershipState::Ban { + debug_warn!("{sender_user} is banned from {room_id} but attempted to join"); + // TODO: return reason + return Err!(Request(Forbidden("You are banned from the room."))); + } + } + let local_join = server_in_room || servers.is_empty() || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); From d63c8b9fcaccb93981b0afcccb6c1c6d823d0f73 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 13:16:31 +0100 Subject: [PATCH 032/270] feat: Support passing through MSC4293 redact_events --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- src/api/client/membership.rs | 1 + 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7852d2ca..ab5702e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3798,7 +3798,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "assign", "js_int", @@ -3818,7 +3818,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "js_int", "ruma-common", @@ -3830,7 +3830,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "as_variant", "assign", @@ -3853,7 +3853,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "as_variant", "base64 0.22.1", @@ -3885,7 +3885,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "as_variant", "indexmap 2.9.0", @@ -3910,7 +3910,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "bytes", "headers", @@ -3932,7 +3932,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3941,7 +3941,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "js_int", "ruma-common", @@ -3951,7 +3951,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3966,7 +3966,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "js_int", "ruma-common", @@ -3978,7 +3978,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index f3de5d7f..d54b86a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +rev = "0012040617213eccb682b65d0ac76c0e5cc94d8c" features = [ "compat", "rand", diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 1800fa60..b05e2003 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -646,6 +646,7 @@ pub(crate) async fn ban_user_route( is_direct: None, join_authorized_via_users_server: None, third_party_invite: None, + redact_events: body.redact_events, ..current_member_content }), sender_user, From 4b5e8df95c60435fd3e6bce016229108c0494911 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 13:29:27 +0100 Subject: [PATCH 033/270] fix: Add missing init fields --- src/api/client/membership.rs | 1 + src/api/client/profile.rs | 2 ++ src/api/client/room/upgrade.rs | 1 + 3 files changed, 4 insertions(+) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index b05e2003..145b3cde 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1824,6 +1824,7 @@ pub async fn leave_room( displayname: None, third_party_invite: None, blurhash: None, + redact_events: None, }; let is_banned = services.rooms.metadata.is_banned(room_id); diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 3699b590..e2d1c934 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -343,6 +343,7 @@ pub async fn update_displayname( reason: None, is_direct: None, third_party_invite: None, + redact_events: None, }); Ok((pdu, room_id)) @@ -396,6 +397,7 @@ pub async fn update_avatar_url( reason: None, is_direct: None, third_party_invite: None, + redact_events: None, }); Ok((pdu, room_id)) diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 9ec0b3bb..da5b49fe 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -189,6 +189,7 @@ pub(crate) async fn upgrade_room_route( blurhash: services.users.blurhash(sender_user).await.ok(), reason: None, join_authorized_via_users_server: None, + redact_events: None, }) .expect("event is valid, we just created it"), unsigned: None, From b4bdd1ee65dc1c0aa4ff974c13a0237542552fc2 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 13:43:27 +0100 Subject: [PATCH 034/270] chore: Update ruwuma Fixes the wrong field name being serialised --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab5702e8..92044b92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3798,7 +3798,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "assign", "js_int", @@ -3818,7 +3818,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "js_int", "ruma-common", @@ -3830,7 +3830,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "as_variant", "assign", @@ -3853,7 +3853,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "as_variant", "base64 0.22.1", @@ -3885,7 +3885,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "as_variant", "indexmap 2.9.0", @@ -3910,7 +3910,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "bytes", "headers", @@ -3932,7 +3932,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3941,7 +3941,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "js_int", "ruma-common", @@ -3951,7 +3951,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3966,7 +3966,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "js_int", "ruma-common", @@ -3978,7 +3978,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index d54b86a4..5c289adf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "0012040617213eccb682b65d0ac76c0e5cc94d8c" +rev = "9b65f83981f6f53d185ce77da37aaef9dfd764a9" features = [ "compat", "rand", From fac9e090cdebbd21c3b282ed04c61d0b8c86ebdd Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:32:02 +0100 Subject: [PATCH 035/270] feat: Add suspension helper to user service --- src/service/users/mod.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 701561a8..1a9f6600 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -15,6 +15,7 @@ use ruma::{ AnyToDeviceEvent, GlobalAccountDataEventType, ignored_user_list::IgnoredUserListEvent, }, serde::Raw, + uint, }; use serde_json::json; @@ -52,6 +53,7 @@ struct Data { userid_lastonetimekeyupdate: Arc, userid_masterkeyid: Arc, userid_password: Arc, + userid_suspended: Arc, userid_selfsigningkeyid: Arc, userid_usersigningkeyid: Arc, useridprofilekey_value: Arc, @@ -87,6 +89,7 @@ impl crate::Service for Service { userid_lastonetimekeyupdate: args.db["userid_lastonetimekeyupdate"].clone(), userid_masterkeyid: args.db["userid_masterkeyid"].clone(), userid_password: args.db["userid_password"].clone(), + userid_suspended: args.db["userid_suspended"].clone(), userid_selfsigningkeyid: args.db["userid_selfsigningkeyid"].clone(), userid_usersigningkeyid: args.db["userid_usersigningkeyid"].clone(), useridprofilekey_value: args.db["useridprofilekey_value"].clone(), @@ -143,6 +146,16 @@ impl Service { Ok(()) } + /// Suspend account, placing it in a read-only state + pub async fn suspend_account(&self, user_id: &UserId) -> () { + self.db.userid_suspended.insert(user_id, "1"); + } + + /// Unsuspend account, placing it in a read-write state + pub async fn unsuspend_account(&self, user_id: &UserId) -> () { + self.db.userid_suspended.remove(user_id); + } + /// Check if a user has an account on this homeserver. #[inline] pub async fn exists(&self, user_id: &UserId) -> bool { @@ -159,6 +172,16 @@ impl Service { .await } + /// Check if account is suspended + pub async fn is_suspended(&self, user_id: &UserId) -> Result { + self.db + .userid_suspended + .get(user_id) + .map_ok(|val| val.is_empty()) + .map_err(|_| err!(Request(NotFound("User does not exist.")))) + .await + } + /// Check if account is active, infallible pub async fn is_active(&self, user_id: &UserId) -> bool { !self.is_deactivated(user_id).await.unwrap_or(true) From accfda258638d7eef9b2c35c15e2b1658a478743 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:35:58 +0100 Subject: [PATCH 036/270] feat: Prevent suspended users sending events --- src/api/client/send.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/api/client/send.rs b/src/api/client/send.rs index f753fa65..b87d1822 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -23,6 +23,9 @@ pub(crate) async fn send_message_event_route( let sender_user = body.sender_user(); let sender_device = body.sender_device.as_deref(); let appservice_info = body.appservice_info.as_ref(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } // Forbid m.room.encrypted if encryption is disabled if MessageLikeEventType::RoomEncrypted == body.event_type && !services.config.allow_encryption From 286974cb9a676bb58501835fbdf1fce08b3d9c6c Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:37:09 +0100 Subject: [PATCH 037/270] feat: Prevent suspended users redacting events --- src/api/client/redact.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index 8dbe47a6..a8eaf91d 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{Result, matrix::pdu::PduBuilder}; +use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; use ruma::{ api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent, }; @@ -17,6 +17,10 @@ pub(crate) async fn redact_event_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; + if services.users.is_suspended(sender_user).await? { + // TODO: Users can redact their own messages while suspended + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; From a6ba9e3045e7cdaccbae006d0037b045c1348016 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:39:24 +0100 Subject: [PATCH 038/270] feat: Prevent suspended users changing their profile --- src/api/client/profile.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index e2d1c934..bdba4078 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -36,6 +36,9 @@ pub(crate) async fn set_displayname_route( body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } if *sender_user != body.user_id && body.appservice_info.is_none() { return Err!(Request(Forbidden("You cannot update the profile of another user"))); @@ -125,6 +128,9 @@ pub(crate) async fn set_avatar_url_route( body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } if *sender_user != body.user_id && body.appservice_info.is_none() { return Err!(Request(Forbidden("You cannot update the profile of another user"))); From a94128e6986f7f05909329de694e547543b4dc36 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:39:57 +0100 Subject: [PATCH 039/270] feat: Prevent suspended users joining/knocking on rooms --- src/api/client/membership.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 145b3cde..d78ebdec 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -178,6 +178,9 @@ pub(crate) async fn join_room_by_id_route( body: Ruma, ) -> Result { let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } banned_room_check( &services, @@ -249,6 +252,9 @@ pub(crate) async fn join_room_by_id_or_alias_route( let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let appservice_info = &body.appservice_info; let body = body.body; + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { | Ok(room_id) => { @@ -369,6 +375,9 @@ pub(crate) async fn knock_room_route( ) -> Result { let sender_user = body.sender_user(); let body = &body.body; + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { | Ok(room_id) => { @@ -492,6 +501,9 @@ pub(crate) async fn invite_user_route( body: Ruma, ) -> Result { let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { debug_error!( From e127c4e5a2e08498c61df30b8424a01f30671bca Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:46:22 +0100 Subject: [PATCH 040/270] feat: Add un/suspend admin commands --- src/admin/user/commands.rs | 34 ++++++++++++++++++++++++++++++++++ src/admin/user/mod.rs | 22 ++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index e5e481e5..297e4bb3 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -224,6 +224,40 @@ pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) -> .await } +#[admin_command] +pub(super) async fn suspend(&self, user_id: String) -> Result { + let user_id = parse_local_user_id(self.services, &user_id)?; + + if user_id == self.services.globals.server_user { + return Err!("Not allowed to suspend the server service account.",); + } + + if !self.services.users.exists(&user_id).await { + return Err!("User {user_id} does not exist."); + } + self.services.users.suspend_account(&user_id).await; + + self.write_str(&format!("User {user_id} has been suspended.")) + .await +} + +#[admin_command] +pub(super) async fn unsuspend(&self, user_id: String) -> Result { + let user_id = parse_local_user_id(self.services, &user_id)?; + + if user_id == self.services.globals.server_user { + return Err!("Not allowed to unsuspend the server service account.",); + } + + if !self.services.users.exists(&user_id).await { + return Err!("User {user_id} does not exist."); + } + self.services.users.unsuspend_account(&user_id).await; + + self.write_str(&format!("User {user_id} has been unsuspended.")) + .await +} + #[admin_command] pub(super) async fn reset_password(&self, username: String, password: Option) -> Result { let user_id = parse_local_user_id(self.services, &username)?; diff --git a/src/admin/user/mod.rs b/src/admin/user/mod.rs index e789376a..645d3637 100644 --- a/src/admin/user/mod.rs +++ b/src/admin/user/mod.rs @@ -59,6 +59,28 @@ pub(super) enum UserCommand { force: bool, }, + /// - Suspend a user + /// + /// Suspended users are able to log in, sync, and read messages, but are not + /// able to send events nor redact them, cannot change their profile, and + /// are unable to join, invite to, or knock on rooms. + /// + /// Suspended users can still leave rooms and deactivate their account. + /// Suspending them effectively makes them read-only. + Suspend { + /// Username of the user to suspend + user_id: String, + }, + + /// - Unsuspend a user + /// + /// Reverses the effects of the `suspend` command, allowing the user to send + /// messages, change their profile, create room invites, etc. + Unsuspend { + /// Username of the user to unsuspend + user_id: String, + }, + /// - List local users in the database #[clap(alias = "list")] ListUsers, From 5d5350a9fee3b4959ef66b0d18a588d2f2be7dde Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:47:02 +0100 Subject: [PATCH 041/270] feat: Prevent suspended users creating new rooms --- src/api/client/room/create.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index be3fd23b..d1dffc51 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -70,6 +70,10 @@ pub(crate) async fn create_room_route( )); } + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + let room_id: OwnedRoomId = match &body.room_id { | Some(custom_room_id) => custom_room_id_check(&services, custom_room_id)?, | _ => RoomId::new(&services.server.name), From 968c0e236c37d204d9718689dd79888cf32f54e5 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 21:19:10 +0100 Subject: [PATCH 042/270] fix: Create the column appropriately --- src/database/maps.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/database/maps.rs b/src/database/maps.rs index 19f9ced4..91ba2ebe 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -378,6 +378,10 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "userid_password", ..descriptor::RANDOM }, + Descriptor { + name: "userid_suspended", + ..descriptor::RANDOM_SMALL + }, Descriptor { name: "userid_presenceid", ..descriptor::RANDOM_SMALL From 8791a9b851f8fc3cd827fe898406561fc4104287 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 21:19:37 +0100 Subject: [PATCH 043/270] fix: Inappropriate empty check I once again, assumed `true` is actually `false`. --- src/service/users/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 1a9f6600..6d3b662f 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -177,7 +177,7 @@ impl Service { self.db .userid_suspended .get(user_id) - .map_ok(|val| val.is_empty()) + .map_ok(|val| !val.is_empty()) .map_err(|_| err!(Request(NotFound("User does not exist.")))) .await } From cc864dc8bbb299b7a48c6fb963a61b6979ceb278 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 21:20:56 +0100 Subject: [PATCH 044/270] feat: Do not allow suspending admin users --- src/admin/user/commands.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 297e4bb3..61f10a86 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -235,6 +235,9 @@ pub(super) async fn suspend(&self, user_id: String) -> Result { if !self.services.users.exists(&user_id).await { return Err!("User {user_id} does not exist."); } + if self.services.users.is_admin(&user_id).await { + return Err!("Admin users cannot be suspended."); + } self.services.users.suspend_account(&user_id).await; self.write_str(&format!("User {user_id} has been suspended.")) From 1ff8af8e9e6ef723287dd9b174dc69844bf8631c Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 21:24:20 +0100 Subject: [PATCH 045/270] style: Remove unneeded statements (clippy) --- src/service/users/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 6d3b662f..f6a98858 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -15,7 +15,6 @@ use ruma::{ AnyToDeviceEvent, GlobalAccountDataEventType, ignored_user_list::IgnoredUserListEvent, }, serde::Raw, - uint, }; use serde_json::json; @@ -147,12 +146,12 @@ impl Service { } /// Suspend account, placing it in a read-only state - pub async fn suspend_account(&self, user_id: &UserId) -> () { + pub async fn suspend_account(&self, user_id: &UserId) { self.db.userid_suspended.insert(user_id, "1"); } /// Unsuspend account, placing it in a read-write state - pub async fn unsuspend_account(&self, user_id: &UserId) -> () { + pub async fn unsuspend_account(&self, user_id: &UserId) { self.db.userid_suspended.remove(user_id); } From d0548ec0644ebb0e3538f65088db9b16ce0eb5a8 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 21:30:07 +0100 Subject: [PATCH 046/270] feat: Forbid suspended users from sending state events --- src/api/client/state.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 2ddc8f14..07802b1b 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -33,6 +33,10 @@ pub(crate) async fn send_state_event_for_key_route( ) -> Result { let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + Ok(send_state_event::v3::Response { event_id: send_state_event_for_key_helper( &services, From 90180916eb13cdcf4e81c668fa6255222c781f1d Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 22:42:31 +0100 Subject: [PATCH 047/270] feat: Prevent suspended users performing room changes Prevents kicks, bans, unbans, and alias modification --- src/api/client/alias.rs | 6 ++++++ src/api/client/directory.rs | 3 +++ src/api/client/membership.rs | 16 ++++++++++++++-- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 9f1b05f8..dc7aad44 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -18,6 +18,9 @@ pub(crate) async fn create_alias_route( body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } services .rooms @@ -63,6 +66,9 @@ pub(crate) async fn delete_alias_route( body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } services .rooms diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index aa6ae168..2e219fd9 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -128,6 +128,9 @@ pub(crate) async fn set_room_visibility_route( // Return 404 if the room doesn't exist return Err!(Request(NotFound("Room not found"))); } + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } if services .users diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index d78ebdec..e6392533 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -578,6 +578,10 @@ pub(crate) async fn kick_user_route( State(services): State, body: Ruma, ) -> Result { + let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; let Ok(event) = services @@ -613,7 +617,7 @@ pub(crate) async fn kick_user_route( third_party_invite: None, ..event }), - body.sender_user(), + sender_user, &body.room_id, &state_lock, ) @@ -637,6 +641,10 @@ pub(crate) async fn ban_user_route( return Err!(Request(Forbidden("You cannot ban yourself."))); } + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; let current_member_content = services @@ -679,6 +687,10 @@ pub(crate) async fn unban_user_route( State(services): State, body: Ruma, ) -> Result { + let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; let current_member_content = services @@ -707,7 +719,7 @@ pub(crate) async fn unban_user_route( is_direct: None, ..current_member_content }), - body.sender_user(), + sender_user, &body.room_id, &state_lock, ) From 8e06571e7c7e88c14580c7e6206aecdb12a5eb44 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 22:42:49 +0100 Subject: [PATCH 048/270] feat: Prevent suspended users uploading media --- src/api/client/media.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 94572413..11d5450c 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -52,6 +52,9 @@ pub(crate) async fn create_content_route( body: Ruma, ) -> Result { let user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } let filename = body.filename.as_deref(); let content_type = body.content_type.as_deref(); From 08527a28804f673817793af86ea94086fcba3a5c Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 22:43:35 +0100 Subject: [PATCH 049/270] feat: Prevent suspended users upgrading rooms --- src/api/client/room/upgrade.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index da5b49fe..d8f5ea83 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -2,7 +2,7 @@ use std::cmp::max; use axum::extract::State; use conduwuit::{ - Error, Result, err, info, + Err, Error, Result, err, info, matrix::{StateKey, pdu::PduBuilder}, }; use futures::StreamExt; @@ -63,6 +63,10 @@ pub(crate) async fn upgrade_room_route( )); } + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + // Create a replacement room let replacement_room = RoomId::new(services.globals.server_name()); From 1124097bd17eea3d10b35b9f05539321050b248a Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 22:52:20 +0100 Subject: [PATCH 050/270] feat: Only allow private read receipts when suspended --- src/api/client/read_marker.rs | 49 +++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index fbfc8fea..e152869c 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -58,29 +58,34 @@ pub(crate) async fn set_read_marker_route( } if let Some(event) = &body.read_receipt { - let receipt_content = BTreeMap::from_iter([( - event.to_owned(), - BTreeMap::from_iter([( - ReceiptType::Read, - BTreeMap::from_iter([(sender_user.to_owned(), ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - thread: ReceiptThread::Unthreaded, - })]), - )]), - )]); + if !services.users.is_suspended(sender_user).await? { + let receipt_content = BTreeMap::from_iter([( + event.to_owned(), + BTreeMap::from_iter([( + ReceiptType::Read, + BTreeMap::from_iter([( + sender_user.to_owned(), + ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, + }, + )]), + )]), + )]); - services - .rooms - .read_receipt - .readreceipt_update( - sender_user, - &body.room_id, - &ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - ) - .await; + services + .rooms + .read_receipt + .readreceipt_update( + sender_user, + &body.room_id, + &ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + ) + .await; + } } if let Some(event) = &body.private_read_receipt { From 72f8cb30384a32b1489b676fcfc76fc59f323c42 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 22:53:25 +0100 Subject: [PATCH 051/270] feat: Do not allow suspended users to send typing statuses --- src/api/client/typing.rs | 67 ++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index 1d8d02fd..7b0df538 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -26,41 +26,42 @@ pub(crate) async fn create_typing_event_route( { return Err!(Request(Forbidden("You are not in this room."))); } - - match body.state { - | Typing::Yes(duration) => { - let duration = utils::clamp( - duration.as_millis().try_into().unwrap_or(u64::MAX), + if !services.users.is_suspended(sender_user).await? { + match body.state { + | Typing::Yes(duration) => { + let duration = utils::clamp( + duration.as_millis().try_into().unwrap_or(u64::MAX), + services + .server + .config + .typing_client_timeout_min_s + .try_mul(1000)?, + services + .server + .config + .typing_client_timeout_max_s + .try_mul(1000)?, + ); services - .server - .config - .typing_client_timeout_min_s - .try_mul(1000)?, + .rooms + .typing + .typing_add( + sender_user, + &body.room_id, + utils::millis_since_unix_epoch() + .checked_add(duration) + .expect("user typing timeout should not get this high"), + ) + .await?; + }, + | _ => { services - .server - .config - .typing_client_timeout_max_s - .try_mul(1000)?, - ); - services - .rooms - .typing - .typing_add( - sender_user, - &body.room_id, - utils::millis_since_unix_epoch() - .checked_add(duration) - .expect("user typing timeout should not get this high"), - ) - .await?; - }, - | _ => { - services - .rooms - .typing - .typing_remove(sender_user, &body.room_id) - .await?; - }, + .rooms + .typing + .typing_remove(sender_user, &body.room_id) + .await?; + }, + } } // ping presence From eb2e3b3bb70af18ad0821d046fe51926977b5d0a Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 01:52:02 +0100 Subject: [PATCH 052/270] fix: Missing suspensions shouldn't error Turns out copying and pasting the function above verbatim actually introduces more problems than it solves! --- src/service/users/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index f6a98858..d80a7e22 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -2,7 +2,7 @@ use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ Err, Error, Result, Server, at, debug_warn, err, trace, - utils::{self, ReadyExt, stream::TryIgnore, string::Unquoted}, + utils::{self, ReadyExt, TryFutureExtExt, stream::TryIgnore, string::Unquoted}, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; @@ -176,8 +176,7 @@ impl Service { self.db .userid_suspended .get(user_id) - .map_ok(|val| !val.is_empty()) - .map_err(|_| err!(Request(NotFound("User does not exist.")))) + .map_ok_or(Ok(false), |_| Ok(true)) .await } From d8a27eeb54b79a1126dd08c48f8f20d9cc0c0104 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 02:28:04 +0100 Subject: [PATCH 053/270] fix: Failing open on database errors oops --- src/service/users/mod.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index d80a7e22..5db7dc1d 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,11 +1,11 @@ use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ - Err, Error, Result, Server, at, debug_warn, err, trace, + Err, Error, Result, Server, at, debug_warn, err, result, trace, utils::{self, ReadyExt, TryFutureExtExt, stream::TryIgnore, string::Unquoted}, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; -use futures::{Stream, StreamExt, TryFutureExt}; +use futures::{Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{ DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, @@ -176,7 +176,19 @@ impl Service { self.db .userid_suspended .get(user_id) - .map_ok_or(Ok(false), |_| Ok(true)) + .map_ok_or_else( + |err| { + if err.is_not_found() { + Ok(false) + } else { + err!(Database(error!( + "Failed to check if user {user_id} is suspended: {err}" + ))); + Ok(true) + } + }, + |_| Ok(true), + ) .await } From 13e17d52e02f33847bed089d4451d248694be854 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 02:30:52 +0100 Subject: [PATCH 054/270] style: Remove unnecessary imports (clippy) --- src/service/users/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 5db7dc1d..b2a42959 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,11 +1,11 @@ use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ - Err, Error, Result, Server, at, debug_warn, err, result, trace, - utils::{self, ReadyExt, TryFutureExtExt, stream::TryIgnore, string::Unquoted}, + Err, Error, Result, Server, at, debug_warn, err, trace, + utils::{self, ReadyExt, stream::TryIgnore, string::Unquoted}, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; -use futures::{Stream, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, From ecc6fda98b135e9e5fdb9cd93124baac7a4fc001 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 29 Jun 2025 15:07:04 +0100 Subject: [PATCH 055/270] feat: Record metadata about user suspensions --- src/admin/user/commands.rs | 6 +++- src/database/maps.rs | 2 +- src/service/users/mod.rs | 56 ++++++++++++++++++++++++-------------- 3 files changed, 42 insertions(+), 22 deletions(-) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 61f10a86..ad2d1c78 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -238,7 +238,11 @@ pub(super) async fn suspend(&self, user_id: String) -> Result { if self.services.users.is_admin(&user_id).await { return Err!("Admin users cannot be suspended."); } - self.services.users.suspend_account(&user_id).await; + // TODO: Record the actual user that sent the suspension where possible + self.services + .users + .suspend_account(&user_id, self.services.globals.server_user.as_ref()) + .await; self.write_str(&format!("User {user_id} has been suspended.")) .await diff --git a/src/database/maps.rs b/src/database/maps.rs index 91ba2ebe..214dbf34 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -379,7 +379,7 @@ pub(super) static MAPS: &[Descriptor] = &[ ..descriptor::RANDOM }, Descriptor { - name: "userid_suspended", + name: "userid_suspension", ..descriptor::RANDOM_SMALL }, Descriptor { diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index b2a42959..d2dfccd9 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -16,10 +16,21 @@ use ruma::{ }, serde::Raw, }; +use serde::{Deserialize, Serialize}; use serde_json::json; use crate::{Dep, account_data, admin, globals, rooms}; +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UserSuspension { + /// Whether the user is currently suspended + pub suspended: bool, + /// When the user was suspended (Unix timestamp in milliseconds) + pub suspended_at: u64, + /// User ID of who suspended this user + pub suspended_by: String, +} + pub struct Service { services: Services, db: Data, @@ -52,7 +63,7 @@ struct Data { userid_lastonetimekeyupdate: Arc, userid_masterkeyid: Arc, userid_password: Arc, - userid_suspended: Arc, + userid_suspension: Arc, userid_selfsigningkeyid: Arc, userid_usersigningkeyid: Arc, useridprofilekey_value: Arc, @@ -88,7 +99,7 @@ impl crate::Service for Service { userid_lastonetimekeyupdate: args.db["userid_lastonetimekeyupdate"].clone(), userid_masterkeyid: args.db["userid_masterkeyid"].clone(), userid_password: args.db["userid_password"].clone(), - userid_suspended: args.db["userid_suspended"].clone(), + userid_suspension: args.db["userid_suspension"].clone(), userid_selfsigningkeyid: args.db["userid_selfsigningkeyid"].clone(), userid_usersigningkeyid: args.db["userid_usersigningkeyid"].clone(), useridprofilekey_value: args.db["useridprofilekey_value"].clone(), @@ -146,13 +157,20 @@ impl Service { } /// Suspend account, placing it in a read-only state - pub async fn suspend_account(&self, user_id: &UserId) { - self.db.userid_suspended.insert(user_id, "1"); + pub async fn suspend_account(&self, user_id: &UserId, suspending_user: &UserId) { + self.db.userid_suspension.raw_put( + user_id, + Json(UserSuspension { + suspended: true, + suspended_at: MilliSecondsSinceUnixEpoch::now().get().into(), + suspended_by: suspending_user.to_string(), + }), + ); } /// Unsuspend account, placing it in a read-write state pub async fn unsuspend_account(&self, user_id: &UserId) { - self.db.userid_suspended.remove(user_id); + self.db.userid_suspension.remove(user_id); } /// Check if a user has an account on this homeserver. @@ -173,23 +191,21 @@ impl Service { /// Check if account is suspended pub async fn is_suspended(&self, user_id: &UserId) -> Result { - self.db - .userid_suspended + match self + .db + .userid_suspension .get(user_id) - .map_ok_or_else( - |err| { - if err.is_not_found() { - Ok(false) - } else { - err!(Database(error!( - "Failed to check if user {user_id} is suspended: {err}" - ))); - Ok(true) - } - }, - |_| Ok(true), - ) .await + .deserialized::() + { + | Ok(s) => Ok(s.suspended), + | Err(e) => + if e.is_not_found() { + Ok(false) + } else { + Err(e) + }, + } } /// Check if account is active, infallible From acb74faa070801c4ee48cc76f6d3dd19174876b9 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 29 Jun 2025 15:17:27 +0100 Subject: [PATCH 056/270] feat: Pass sender through admin commands --- src/admin/context.rs | 15 ++++++++++++++- src/admin/processor.rs | 1 + src/admin/user/commands.rs | 2 +- src/service/admin/mod.rs | 23 ++++++++++++++++++++--- src/service/rooms/timeline/mod.rs | 8 +++++--- 5 files changed, 41 insertions(+), 8 deletions(-) diff --git a/src/admin/context.rs b/src/admin/context.rs index 270537be..b453d88f 100644 --- a/src/admin/context.rs +++ b/src/admin/context.rs @@ -7,13 +7,14 @@ use futures::{ io::{AsyncWriteExt, BufWriter}, lock::Mutex, }; -use ruma::EventId; +use ruma::{EventId, UserId}; pub(crate) struct Context<'a> { pub(crate) services: &'a Services, pub(crate) body: &'a [&'a str], pub(crate) timer: SystemTime, pub(crate) reply_id: Option<&'a EventId>, + pub(crate) sender: Option<&'a UserId>, pub(crate) output: Mutex>>, } @@ -36,4 +37,16 @@ impl Context<'_> { output.write_all(s.as_bytes()).map_err(Into::into).await }) } + + /// Get the sender of the admin command, if available + pub(crate) fn sender(&self) -> Option<&UserId> { self.sender } + + /// Check if the command has sender information + pub(crate) fn has_sender(&self) -> bool { self.sender.is_some() } + + /// Get the sender as a string, or service user ID if not available + pub(crate) fn sender_or_service_user(&self) -> &UserId { + self.sender + .unwrap_or_else(|| self.services.globals.server_user.as_ref()) + } } diff --git a/src/admin/processor.rs b/src/admin/processor.rs index f7b7140f..8d1fe89c 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -63,6 +63,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce body: &body, timer: SystemTime::now(), reply_id: input.reply_id.as_deref(), + sender: input.sender.as_deref(), output: BufWriter::new(Vec::new()).into(), }; diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index ad2d1c78..d094fc5f 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -241,7 +241,7 @@ pub(super) async fn suspend(&self, user_id: String) -> Result { // TODO: Record the actual user that sent the suspension where possible self.services .users - .suspend_account(&user_id, self.services.globals.server_user.as_ref()) + .suspend_account(&user_id, self.sender_or_service_user()) .await; self.write_str(&format!("User {user_id} has been suspended.")) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 683f5400..86e12c3c 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -45,11 +45,13 @@ struct Services { services: StdRwLock>>, } -/// Inputs to a command are a multi-line string and optional reply_id. +/// Inputs to a command are a multi-line string, optional reply_id, and optional +/// sender. #[derive(Debug)] pub struct CommandInput { pub command: String, pub reply_id: Option, + pub sender: Option>, } /// Prototype of the tab-completer. The input is buffered text when tab @@ -162,7 +164,22 @@ impl Service { pub fn command(&self, command: String, reply_id: Option) -> Result<()> { self.channel .0 - .send(CommandInput { command, reply_id }) + .send(CommandInput { command, reply_id, sender: None }) + .map_err(|e| err!("Failed to enqueue admin command: {e:?}")) + } + + /// Posts a command to the command processor queue with sender information + /// and returns. Processing will take place on the service worker's task + /// asynchronously. Errors if the queue is full. + pub fn command_with_sender( + &self, + command: String, + reply_id: Option, + sender: Box, + ) -> Result<()> { + self.channel + .0 + .send(CommandInput { command, reply_id, sender: Some(sender) }) .map_err(|e| err!("Failed to enqueue admin command: {e:?}")) } @@ -173,7 +190,7 @@ impl Service { command: String, reply_id: Option, ) -> ProcessorResult { - self.process_command(CommandInput { command, reply_id }) + self.process_command(CommandInput { command, reply_id, sender: None }) .await } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 37963246..534d8faf 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -536,9 +536,11 @@ impl Service { self.services.search.index_pdu(shortroomid, &pdu_id, &body); if self.services.admin.is_admin_command(pdu, &body).await { - self.services - .admin - .command(body, Some((*pdu.event_id).into()))?; + self.services.admin.command_with_sender( + body, + Some((*pdu.event_id).into()), + pdu.sender.clone().into(), + )?; } } }, From d4862b8ead02a6ef61580ddefb24febb56c108b4 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 16:26:04 +0100 Subject: [PATCH 057/270] style: Remove redundant, unused functions --- src/admin/context.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/admin/context.rs b/src/admin/context.rs index b453d88f..3d3cffb7 100644 --- a/src/admin/context.rs +++ b/src/admin/context.rs @@ -38,12 +38,6 @@ impl Context<'_> { }) } - /// Get the sender of the admin command, if available - pub(crate) fn sender(&self) -> Option<&UserId> { self.sender } - - /// Check if the command has sender information - pub(crate) fn has_sender(&self) -> bool { self.sender.is_some() } - /// Get the sender as a string, or service user ID if not available pub(crate) fn sender_or_service_user(&self) -> &UserId { self.sender From ec9d3d613e3b3be018ac73c791c956ae44cc6611 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 29 Jun 2025 23:02:15 +0100 Subject: [PATCH 058/270] chore: Add funding --- .github/FUNDING.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 00000000..321dc74c --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,3 @@ +github: [JadedBlueEyes] +# Doesn't support an array, so we can only list nex +ko_fi: nexy7574 From 17930708d86dd73c5d39dbe758d489621b2626e5 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 29 Jun 2025 23:06:26 +0100 Subject: [PATCH 059/270] chore: Add second ko-fi as custom link --- .github/FUNDING.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 321dc74c..fcfaade5 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,3 +1,5 @@ github: [JadedBlueEyes] # Doesn't support an array, so we can only list nex ko_fi: nexy7574 +custom: + - https://ko-fi.com/JadedBlueEyes From 97e5cc4e2db5fee0495d8f597180fe774b11aa0d Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 1 Jul 2025 01:55:13 +0100 Subject: [PATCH 060/270] feat: Implement user reporting --- Cargo.lock | 22 ++++++------ Cargo.toml | 2 +- src/api/client/report.rs | 72 ++++++++++++++++++++++++++++++++-------- src/api/router.rs | 1 + 4 files changed, 71 insertions(+), 26 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 92044b92..8719c6ca 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3798,7 +3798,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" dependencies = [ "assign", "js_int", @@ -3818,7 +3818,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" dependencies = [ "js_int", "ruma-common", @@ -3830,7 +3830,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" dependencies = [ "as_variant", "assign", @@ -3853,7 +3853,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" dependencies = [ "as_variant", "base64 0.22.1", @@ -3885,7 +3885,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" dependencies = [ "as_variant", "indexmap 2.9.0", @@ -3910,7 +3910,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" dependencies = [ "bytes", "headers", @@ -3932,7 +3932,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3941,7 +3941,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" dependencies = [ "js_int", "ruma-common", @@ -3951,7 +3951,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3966,7 +3966,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" dependencies = [ "js_int", "ruma-common", @@ -3978,7 +3978,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 5c289adf..83afd482 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "9b65f83981f6f53d185ce77da37aaef9dfd764a9" +rev = "f899fff6738dd57d191474b0f12a4509cf8f0981" features = [ "compat", "rand", diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 4ee8ebe5..f63e78ed 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -9,6 +9,7 @@ use ruma::{ EventId, RoomId, UserId, api::client::{ error::ErrorKind, + report_user, room::{report_content, report_room}, }, events::room::message, @@ -30,12 +31,6 @@ pub(crate) async fn report_room_route( // user authentication let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - info!( - "Received room report by user {sender_user} for room {} with reason: \"{}\"", - body.room_id, - body.reason.as_deref().unwrap_or("") - ); - if body.reason.as_ref().is_some_and(|s| s.len() > 750) { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -55,6 +50,11 @@ pub(crate) async fn report_room_route( "Room does not exist to us, no local users have joined at all" ))); } + info!( + "Received room report by user {sender_user} for room {} with reason: \"{}\"", + body.room_id, + body.reason.as_deref().unwrap_or("") + ); // send admin room message that we received the report with an @room ping for // urgency @@ -84,14 +84,6 @@ pub(crate) async fn report_event_route( // user authentication let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - info!( - "Received event report by user {sender_user} for room {} and event ID {}, with reason: \ - \"{}\"", - body.room_id, - body.event_id, - body.reason.as_deref().unwrap_or("") - ); - delay_response().await; // check if we know about the reported event ID or if it's invalid @@ -109,6 +101,13 @@ pub(crate) async fn report_event_route( &pdu, ) .await?; + info!( + "Received event report by user {sender_user} for room {} and event ID {}, with reason: \ + \"{}\"", + body.room_id, + body.event_id, + body.reason.as_deref().unwrap_or("") + ); // send admin room message that we received the report with an @room ping for // urgency @@ -130,6 +129,51 @@ pub(crate) async fn report_event_route( Ok(report_content::v3::Response {}) } +#[tracing::instrument(skip_all, fields(%client), name = "report_user")] +pub(crate) async fn report_user_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + // user authentication + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if body.reason.as_ref().is_some_and(|s| s.len() > 750) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Reason too long, should be 750 characters or fewer", + )); + } + + delay_response().await; + + if !services.users.is_active_local(&body.user_id) { + // return 200 as to not reveal if the user exists. Recommended by spec. + return Ok(report_user::v3::Response {}); + } + + info!( + "Received room report from {sender_user} for user {} with reason: \"{}\"", + body.user_id, + body.reason.as_deref().unwrap_or("") + ); + + // send admin room message that we received the report with an @room ping for + // urgency + services + .admin + .send_message(message::RoomMessageEventContent::text_markdown(format!( + "@room User report received from {} -\n\nUser ID: {}\n\nReport Reason: {}", + sender_user.to_owned(), + body.user_id, + body.reason.as_deref().unwrap_or("") + ))) + .await + .ok(); + + Ok(report_user::v3::Response {}) +} + /// in the following order: /// /// check if the room ID from the URI matches the PDU's room ID diff --git a/src/api/router.rs b/src/api/router.rs index 5416e9e9..d1b05a91 100644 --- a/src/api/router.rs +++ b/src/api/router.rs @@ -94,6 +94,7 @@ pub fn build(router: Router, server: &Server) -> Router { .ruma_route(&client::redact_event_route) .ruma_route(&client::report_event_route) .ruma_route(&client::report_room_route) + .ruma_route(&client::report_user_route) .ruma_route(&client::create_alias_route) .ruma_route(&client::delete_alias_route) .ruma_route(&client::get_alias_route) From 59912709aa603d66bc24c074f0337c71fdeaa56c Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 1 Jul 2025 15:42:38 +0100 Subject: [PATCH 061/270] feat: Send intentional mentions in report messages --- src/api/client/report.rs | 125 +++++++++++++++++++++++++-------------- 1 file changed, 81 insertions(+), 44 deletions(-) diff --git a/src/api/client/report.rs b/src/api/client/report.rs index f63e78ed..1f237fcb 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -1,4 +1,7 @@ -use std::time::Duration; +use std::{ + ops::{Mul, Sub}, + time::Duration, +}; use axum::extract::State; use axum_client_ip::InsecureClientIp; @@ -6,19 +9,35 @@ use conduwuit::{Err, Error, Result, debug_info, info, matrix::pdu::PduEvent, uti use conduwuit_service::Services; use rand::Rng; use ruma::{ - EventId, RoomId, UserId, + EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, api::client::{ error::ErrorKind, report_user, room::{report_content, report_room}, }, - events::room::message, + events::{ + Mentions, + room::{ + message, + message::{RoomMessageEvent, RoomMessageEventContent}, + }, + }, int, }; use tokio::time::sleep; use crate::Ruma; +struct Report { + sender: OwnedUserId, + room_id: Option, + event_id: Option, + user_id: Option, + report_type: String, + reason: Option, + score: Option, +} + /// # `POST /_matrix/client/v3/rooms/{roomId}/report` /// /// Reports an abusive room to homeserver admins @@ -56,18 +75,17 @@ pub(crate) async fn report_room_route( body.reason.as_deref().unwrap_or("") ); - // send admin room message that we received the report with an @room ping for - // urgency - services - .admin - .send_message(message::RoomMessageEventContent::text_markdown(format!( - "@room Room report received from {} -\n\nRoom ID: {}\n\nReport Reason: {}", - sender_user.to_owned(), - body.room_id, - body.reason.as_deref().unwrap_or("") - ))) - .await - .ok(); + let report = Report { + sender: sender_user.to_owned(), + room_id: Some(body.room_id.to_owned()), + event_id: None, + user_id: None, + report_type: "room".to_string(), + reason: body.reason.clone(), + score: None, + }; + + services.admin.send_message(build_report(report)).await.ok(); Ok(report_room::v3::Response {}) } @@ -108,23 +126,16 @@ pub(crate) async fn report_event_route( body.event_id, body.reason.as_deref().unwrap_or("") ); - - // send admin room message that we received the report with an @room ping for - // urgency - services - .admin - .send_message(message::RoomMessageEventContent::text_markdown(format!( - "@room Event report received from {} -\n\nEvent ID: {}\nRoom ID: {}\nSent By: \ - {}\n\nReport Score: {}\nReport Reason: {}", - sender_user.to_owned(), - pdu.event_id, - pdu.room_id, - pdu.sender, - body.score.unwrap_or_else(|| ruma::Int::from(0)), - body.reason.as_deref().unwrap_or("") - ))) - .await - .ok(); + let report = Report { + sender: sender_user.to_owned(), + room_id: Some(body.room_id.to_owned()), + event_id: Some(body.event_id.to_owned()), + user_id: None, + report_type: "event".to_string(), + reason: body.reason.clone(), + score: body.score, + }; + services.admin.send_message(build_report(report)).await.ok(); Ok(report_content::v3::Response {}) } @@ -152,24 +163,23 @@ pub(crate) async fn report_user_route( return Ok(report_user::v3::Response {}); } + let report = Report { + sender: sender_user.to_owned(), + room_id: None, + event_id: None, + user_id: Some(body.user_id.to_owned()), + report_type: "user".to_string(), + reason: body.reason.clone(), + score: None, + }; + info!( "Received room report from {sender_user} for user {} with reason: \"{}\"", body.user_id, body.reason.as_deref().unwrap_or("") ); - // send admin room message that we received the report with an @room ping for - // urgency - services - .admin - .send_message(message::RoomMessageEventContent::text_markdown(format!( - "@room User report received from {} -\n\nUser ID: {}\n\nReport Reason: {}", - sender_user.to_owned(), - body.user_id, - body.reason.as_deref().unwrap_or("") - ))) - .await - .ok(); + services.admin.send_message(build_report(report)).await.ok(); Ok(report_user::v3::Response {}) } @@ -231,6 +241,33 @@ async fn is_event_report_valid( Ok(()) } +/// Builds a report message to be sent to the admin room. +fn build_report(report: Report) -> RoomMessageEventContent { + let mut text = + format!("@room New {} report received from {}:\n\n", report.report_type, report.sender); + if report.user_id.is_some() { + text.push_str(&format!("- Reported User ID: `{}`\n", report.user_id.unwrap())); + } + if report.room_id.is_some() { + text.push_str(&format!("- Reported Room ID: `{}`\n", report.room_id.unwrap())); + } + if report.event_id.is_some() { + text.push_str(&format!("- Reported Event ID: `{}`\n", report.event_id.unwrap())); + } + if let Some(score) = report.score { + if score < int!(0) { + score.mul(int!(-1)); // invert the score to make it N/100 + // unsure why the spec says -100 to 0, but 0 to 100 is more human. + } + text.push_str(&format!("- User-supplied offensiveness score: {}%\n", -score)); + } + if let Some(reason) = report.reason { + text.push_str(&format!("- Report Reason: {}\n", reason)); + } + + RoomMessageEventContent::text_markdown(text).add_mentions(Mentions::with_room_mention()); +} + /// even though this is kinda security by obscurity, let's still make a small /// random delay sending a response per spec suggestion regarding /// enumerating for potential events existing in our server. From f49c73c0317e65b290bb7d50e81ae6a326e2bed6 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 1 Jul 2025 15:44:04 +0100 Subject: [PATCH 062/270] feat: Forbid suspended users from sending reports --- src/api/client/report.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 1f237fcb..d0d21829 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -49,6 +49,9 @@ pub(crate) async fn report_room_route( ) -> Result { // user authentication let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } if body.reason.as_ref().is_some_and(|s| s.len() > 750) { return Err(Error::BadRequest( @@ -101,6 +104,9 @@ pub(crate) async fn report_event_route( ) -> Result { // user authentication let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } delay_response().await; @@ -148,6 +154,9 @@ pub(crate) async fn report_user_route( ) -> Result { // user authentication let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } if body.reason.as_ref().is_some_and(|s| s.len() > 750) { return Err(Error::BadRequest( From 24d2a514e22930e15d27825063cb0a8e9244ddc9 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 1 Jul 2025 18:00:28 +0100 Subject: [PATCH 063/270] chore: Resolve linting errors --- Cargo.lock | 22 ++++++++--------- Cargo.toml | 2 +- src/api/client/report.rs | 45 +++++++++++++---------------------- src/api/client/unversioned.rs | 1 + 4 files changed, 29 insertions(+), 41 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8719c6ca..82e7a20d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3798,7 +3798,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02" dependencies = [ "assign", "js_int", @@ -3818,7 +3818,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02" dependencies = [ "js_int", "ruma-common", @@ -3830,7 +3830,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02" dependencies = [ "as_variant", "assign", @@ -3853,7 +3853,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02" dependencies = [ "as_variant", "base64 0.22.1", @@ -3885,7 +3885,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02" dependencies = [ "as_variant", "indexmap 2.9.0", @@ -3910,7 +3910,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02" dependencies = [ "bytes", "headers", @@ -3932,7 +3932,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3941,7 +3941,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02" dependencies = [ "js_int", "ruma-common", @@ -3951,7 +3951,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3966,7 +3966,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02" dependencies = [ "js_int", "ruma-common", @@ -3978,7 +3978,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=f899fff6738dd57d191474b0f12a4509cf8f0981#f899fff6738dd57d191474b0f12a4509cf8f0981" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=a4b948b40417a65ab0282ae47cc50035dd455e02#a4b948b40417a65ab0282ae47cc50035dd455e02" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index 83afd482..b815e2b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "f899fff6738dd57d191474b0f12a4509cf8f0981" +rev = "a4b948b40417a65ab0282ae47cc50035dd455e02" features = [ "compat", "rand", diff --git a/src/api/client/report.rs b/src/api/client/report.rs index d0d21829..5113b42f 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -1,7 +1,4 @@ -use std::{ - ops::{Mul, Sub}, - time::Duration, -}; +use std::{fmt::Write as _, ops::Mul, time::Duration}; use axum::extract::State; use axum_client_ip::InsecureClientIp; @@ -15,13 +12,7 @@ use ruma::{ report_user, room::{report_content, report_room}, }, - events::{ - Mentions, - room::{ - message, - message::{RoomMessageEvent, RoomMessageEventContent}, - }, - }, + events::{Mentions, room::message::RoomMessageEventContent}, int, }; use tokio::time::sleep; @@ -80,10 +71,10 @@ pub(crate) async fn report_room_route( let report = Report { sender: sender_user.to_owned(), - room_id: Some(body.room_id.to_owned()), + room_id: Some(body.room_id.clone()), event_id: None, user_id: None, - report_type: "room".to_string(), + report_type: "room".to_owned(), reason: body.reason.clone(), score: None, }; @@ -134,10 +125,10 @@ pub(crate) async fn report_event_route( ); let report = Report { sender: sender_user.to_owned(), - room_id: Some(body.room_id.to_owned()), - event_id: Some(body.event_id.to_owned()), + room_id: Some(body.room_id.clone()), + event_id: Some(body.event_id.clone()), user_id: None, - report_type: "event".to_string(), + report_type: "event".to_owned(), reason: body.reason.clone(), score: body.score, }; @@ -167,7 +158,7 @@ pub(crate) async fn report_user_route( delay_response().await; - if !services.users.is_active_local(&body.user_id) { + if !services.users.is_active_local(&body.user_id).await { // return 200 as to not reveal if the user exists. Recommended by spec. return Ok(report_user::v3::Response {}); } @@ -176,8 +167,8 @@ pub(crate) async fn report_user_route( sender: sender_user.to_owned(), room_id: None, event_id: None, - user_id: Some(body.user_id.to_owned()), - report_type: "user".to_string(), + user_id: Some(body.user_id.clone()), + report_type: "user".to_owned(), reason: body.reason.clone(), score: None, }; @@ -255,26 +246,22 @@ fn build_report(report: Report) -> RoomMessageEventContent { let mut text = format!("@room New {} report received from {}:\n\n", report.report_type, report.sender); if report.user_id.is_some() { - text.push_str(&format!("- Reported User ID: `{}`\n", report.user_id.unwrap())); + let _ = writeln!(text, "- Reported User ID: `{}`", report.user_id.unwrap()); } if report.room_id.is_some() { - text.push_str(&format!("- Reported Room ID: `{}`\n", report.room_id.unwrap())); + let _ = writeln!(text, "- Reported Room ID: `{}`", report.room_id.unwrap()); } if report.event_id.is_some() { - text.push_str(&format!("- Reported Event ID: `{}`\n", report.event_id.unwrap())); + let _ = writeln!(text, "- Reported Event ID: `{}`", report.event_id.unwrap()); } if let Some(score) = report.score { - if score < int!(0) { - score.mul(int!(-1)); // invert the score to make it N/100 - // unsure why the spec says -100 to 0, but 0 to 100 is more human. - } - text.push_str(&format!("- User-supplied offensiveness score: {}%\n", -score)); + let _ = writeln!(text, "- User-supplied offensiveness score: {}%", score.mul(int!(-1))); } if let Some(reason) = report.reason { - text.push_str(&format!("- Report Reason: {}\n", reason)); + let _ = writeln!(text, "- Report Reason: {reason}"); } - RoomMessageEventContent::text_markdown(text).add_mentions(Mentions::with_room_mention()); + RoomMessageEventContent::text_markdown(text).add_mentions(Mentions::with_room_mention()) } /// even though this is kinda security by obscurity, let's still make a small diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index 232d5b28..ad377ca4 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -38,6 +38,7 @@ pub(crate) async fn get_supported_versions_route( "v1.4".to_owned(), "v1.5".to_owned(), "v1.11".to_owned(), + "v1.14".to_owned(), ], unstable_features: BTreeMap::from_iter([ ("org.matrix.e2e_cross_signing".to_owned(), true), From 4f69da47c6899e5d6e3280c34524e6aa53f838f4 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 1 Jul 2025 18:38:48 +0100 Subject: [PATCH 064/270] feat: Advertise support for spec v1.8, 1.12, 1.13, and 1.14 --- committed.toml | 1 + src/api/client/unversioned.rs | 3 +++ 2 files changed, 4 insertions(+) diff --git a/committed.toml b/committed.toml index 64f7f18a..59750fa5 100644 --- a/committed.toml +++ b/committed.toml @@ -1,2 +1,3 @@ style = "conventional" +subject_length = 72 allowed_types = ["ci", "build", "fix", "feat", "chore", "docs", "style", "refactor", "perf", "test"] diff --git a/src/api/client/unversioned.rs b/src/api/client/unversioned.rs index ad377ca4..a4136d1a 100644 --- a/src/api/client/unversioned.rs +++ b/src/api/client/unversioned.rs @@ -37,7 +37,10 @@ pub(crate) async fn get_supported_versions_route( "v1.3".to_owned(), "v1.4".to_owned(), "v1.5".to_owned(), + "v1.8".to_owned(), "v1.11".to_owned(), + "v1.12".to_owned(), + "v1.13".to_owned(), "v1.14".to_owned(), ], unstable_features: BTreeMap::from_iter([ From b44791799c8141d93cf50634eb799505cae8f15a Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 1 Jul 2025 23:14:41 +0100 Subject: [PATCH 065/270] fix: Room bans preventing federated leaves Fixes the issue where room bans prevent federating leave events, resulting in local users being stuck in remote rooms --- src/admin/room/moderation.rs | 62 +++++++++++------------------------- 1 file changed, 18 insertions(+), 44 deletions(-) diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index ee429fc6..921249bd 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -1,7 +1,7 @@ use api::client::leave_room; use clap::Subcommand; use conduwuit::{ - Err, Result, debug, + Err, Result, debug, info, utils::{IterStream, ReadyExt}, warn, }; @@ -70,7 +70,6 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { }; debug!("Room specified is a room ID, banning room ID"); - self.services.rooms.metadata.ban_room(room_id, true); room_id.to_owned() } else if room.is_room_alias_id() { @@ -90,47 +89,25 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { locally, if not using get_alias_helper to fetch room ID remotely" ); - let room_id = match self + match self .services .rooms .alias - .resolve_local_alias(room_alias) + .resolve_alias(room_alias, None) .await { - | Ok(room_id) => room_id, - | _ => { + | Ok((room_id, servers)) => { debug!( - "We don't have this room alias to a room ID locally, attempting to fetch \ - room ID over federation" + ?room_id, + ?servers, + "Got federation response fetching room ID for room {room}" ); - - match self - .services - .rooms - .alias - .resolve_alias(room_alias, None) - .await - { - | Ok((room_id, servers)) => { - debug!( - ?room_id, - ?servers, - "Got federation response fetching room ID for {room_id}" - ); - room_id - }, - | Err(e) => { - return Err!( - "Failed to resolve room alias {room_alias} to a room ID: {e}" - ); - }, - } + room_id }, - }; - - self.services.rooms.metadata.ban_room(&room_id, true); - - room_id + | Err(e) => { + return Err!("Failed to resolve room alias {room} to a room ID: {e}"); + }, + } } else { return Err!( "Room specified is not a room ID or room alias. Please note that this requires a \ @@ -139,7 +116,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { ); }; - debug!("Making all users leave the room {room_id} and forgetting it"); + info!("Making all users leave the room {room_id} and forgetting it"); let mut users = self .services .rooms @@ -150,7 +127,7 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { .boxed(); while let Some(ref user_id) = users.next().await { - debug!( + info!( "Attempting leave for user {user_id} in room {room_id} (ignoring all errors, \ evicting admins too)", ); @@ -177,10 +154,9 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { }) .await; - // unpublish from room directory - self.services.rooms.directory.set_not_public(&room_id); - - self.services.rooms.metadata.disable_room(&room_id, true); + self.services.rooms.directory.set_not_public(&room_id); // remove from the room directory + self.services.rooms.metadata.ban_room(&room_id, true); // prevent further joins + self.services.rooms.metadata.disable_room(&room_id, true); // disable federation self.write_str( "Room banned, removed all our local users, and disabled incoming federation with room.", @@ -302,8 +278,6 @@ async fn ban_list_of_rooms(&self) -> Result { } for room_id in room_ids { - self.services.rooms.metadata.ban_room(&room_id, true); - debug!("Banned {room_id} successfully"); room_ban_count = room_ban_count.saturating_add(1); @@ -346,9 +320,9 @@ async fn ban_list_of_rooms(&self) -> Result { }) .await; + self.services.rooms.metadata.ban_room(&room_id, true); // unpublish from room directory, ignore errors self.services.rooms.directory.set_not_public(&room_id); - self.services.rooms.metadata.disable_room(&room_id, true); } From 68afb07c27368c0ec4d9fc936ef0cf39eb2233be Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 1 Jul 2025 01:36:58 +0100 Subject: [PATCH 066/270] feat: Stabilise room summary API (MSC3266) # Conflicts: # Cargo.lock # Cargo.toml --- src/api/client/room/summary.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs index 67d2e2ad..ab534765 100644 --- a/src/api/client/room/summary.rs +++ b/src/api/client/room/summary.rs @@ -43,10 +43,9 @@ pub(crate) async fn get_room_summary_legacy( } /// # `GET /_matrix/client/unstable/im.nheko.summary/summary/{roomIdOrAlias}` +/// # `GET /_matrix/client/v1/room_summary/{roomIdOrAlias}` /// /// Returns a short description of the state of a room. -/// -/// An implementation of [MSC3266](https://github.com/matrix-org/matrix-spec-proposals/pull/3266) #[tracing::instrument(skip_all, fields(%client), name = "room_summary")] pub(crate) async fn get_room_summary( State(services): State, From 6e609185847023cfb3caf85b98f03a6e3efb6e2e Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 2 Jul 2025 00:41:34 +0100 Subject: [PATCH 067/270] feat: Suspend new users on registration --- conduwuit-example.toml | 11 +++++++++++ src/api/client/account.rs | 20 ++++++++++++++++++++ src/core/config/mod.rs | 11 +++++++++++ src/service/admin/mod.rs | 16 +++++++++++++++- 4 files changed, 57 insertions(+), 1 deletion(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 1a8be2aa..794ab870 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -398,6 +398,17 @@ # #allow_registration = false +# If registration is enabled, and this setting is true, new users +# registered after the first admin user will be automatically suspended +# and will require an admin to run `!admin users unsuspend `. +# +# Suspended users are still able to read messages, make profile updates, +# leave rooms, and deactivate their account, however cannot send messages, +# invites, or create/join or otherwise modify rooms. +# They are effectively read-only. +# +#suspend_on_register = false + # Enabling this setting opens registration to anyone without restrictions. # This makes your server vulnerable to abuse # diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 32f2530c..32ac7bc2 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -2,6 +2,7 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; +use axum_extra::headers::UserAgent; use conduwuit::{ Err, Error, Result, debug_info, err, error, info, is_equal_to, matrix::pdu::PduBuilder, @@ -490,6 +491,25 @@ pub(crate) async fn register_route( { services.admin.make_user_admin(&user_id).await?; warn!("Granting {user_id} admin privileges as the first user"); + } else if services.config.suspend_on_register { + // This is not an admin, suspend them. + // Note that we can still do auto joins for suspended users + services + .users + .suspend_account(&user_id, &services.globals.server_user) + .await; + // And send an @room notice to the admin room, to prompt admins to review the + // new user and ideally unsuspend them if deemed appropriate. + if services.server.config.admin_room_notices { + services + .admin + .send_loud_message(RoomMessageEventContent::text_plain(format!( + "User {user_id} has been suspended as they are not the first user \ + on this server. Please review and unsuspend them if appropriate." + ))) + .await + .ok(); + } } } } diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index d4a10345..c735193b 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -513,6 +513,17 @@ pub struct Config { #[serde(default)] pub allow_registration: bool, + /// If registration is enabled, and this setting is true, new users + /// registered after the first admin user will be automatically suspended + /// and will require an admin to run `!admin users unsuspend `. + /// + /// Suspended users are still able to read messages, make profile updates, + /// leave rooms, and deactivate their account, however cannot send messages, + /// invites, or create/join or otherwise modify rooms. + /// They are effectively read-only. + #[serde(default)] + pub suspend_on_register: bool, + /// Enabling this setting opens registration to anyone without restrictions. /// This makes your server vulnerable to abuse #[serde(default)] diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 86e12c3c..11d93cc2 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -18,7 +18,10 @@ use futures::{FutureExt, TryFutureExt}; use loole::{Receiver, Sender}; use ruma::{ OwnedEventId, OwnedRoomId, RoomId, UserId, - events::room::message::{Relation, RoomMessageEventContent}, + events::{ + Mentions, + room::message::{Relation, RoomMessageEventContent}, + }, }; use tokio::sync::RwLock; @@ -158,6 +161,17 @@ impl Service { .await } + /// Sends a message, the same as send_message() but with an @room ping to + /// notify all users in the room. + pub async fn send_loud_message( + &self, + mut message_content: RoomMessageEventContent, + ) -> Result<()> { + // Add @room ping + message_content = message_content.add_mentions(Mentions::with_room_mention()); + self.send_message(message_content).await + } + /// Posts a command to the command processor queue and returns. Processing /// will take place on the service worker's task asynchronously. Errors if /// the queue is full. From 8e0852e5b5697bb7d39639719c7d7e3f969e9c42 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 2 Jul 2025 00:44:47 +0100 Subject: [PATCH 068/270] docs: Add suggestion about auto join room Adds suggestion to suspend_on_register doc that admins should add a room that contains information to their auto_join_rooms as to not confuse new users who may be lost at the fact they can't join any rooms or send any messages. --- src/core/config/mod.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index c735193b..e3db4900 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -521,6 +521,11 @@ pub struct Config { /// leave rooms, and deactivate their account, however cannot send messages, /// invites, or create/join or otherwise modify rooms. /// They are effectively read-only. + /// + /// If you want to use this to screen people who register on your server, + /// you should add a room to `auto_join_rooms` that is public, and contains + /// information that new users can read (since they won't be able to DM + /// anyone, or send a message, and may be confused). #[serde(default)] pub suspend_on_register: bool, From d6aa03ea7308fae430dd99df1d1ce4f5360a1d12 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 2 Jul 2025 01:53:21 +0100 Subject: [PATCH 069/270] style: Remove extraneous import --- src/api/client/account.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 32ac7bc2..56b1a81c 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -2,7 +2,6 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use axum_extra::headers::UserAgent; use conduwuit::{ Err, Error, Result, debug_info, err, error, info, is_equal_to, matrix::pdu::PduBuilder, From 49f7a2487f70ffcfeb0b355e8625a8a694e7ca6c Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 26 Apr 2025 23:06:43 +0000 Subject: [PATCH 070/270] Modernize various sender_user/sender_device lets. Signed-off-by: Jason Volk --- src/api/client/account.rs | 27 +++++++++++---------------- src/api/client/alias.rs | 4 ++-- src/api/client/device.rs | 8 ++------ src/api/client/filter.rs | 10 ++++------ src/api/client/keys.rs | 11 +++++------ src/api/client/media.rs | 10 +++++----- src/api/client/media_legacy.rs | 2 +- src/api/client/membership.rs | 6 +++--- src/api/client/openid.rs | 4 ++-- src/api/client/profile.rs | 4 ++-- src/api/client/push.rs | 21 ++++++++++----------- src/api/client/redact.rs | 4 ++-- src/api/client/report.rs | 5 ++--- src/api/client/room/aliases.rs | 2 +- src/api/client/room/create.rs | 6 +++--- src/api/client/session.rs | 15 ++++----------- src/api/client/state.rs | 2 +- src/api/client/tag.rs | 6 +++--- src/api/client/to_device.rs | 4 ++-- src/api/client/unstable.rs | 8 ++++---- 20 files changed, 69 insertions(+), 90 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 56b1a81c..30f8b89c 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -603,7 +603,6 @@ pub(crate) async fn change_password_route( .sender_user .as_ref() .ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?; - let sender_device = body.sender_device(); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec![AuthType::Password] }], @@ -617,7 +616,7 @@ pub(crate) async fn change_password_route( | Some(auth) => { let (worked, uiaainfo) = services .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) + .try_auth(sender_user, body.sender_device(), auth, &uiaainfo) .await?; if !worked { @@ -631,7 +630,7 @@ pub(crate) async fn change_password_route( uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, json); + .create(sender_user, body.sender_device(), &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); }, @@ -650,7 +649,7 @@ pub(crate) async fn change_password_route( services .users .all_device_ids(sender_user) - .ready_filter(|id| *id != sender_device) + .ready_filter(|id| *id != body.sender_device()) .for_each(|id| services.users.remove_device(sender_user, id)) .await; @@ -659,17 +658,17 @@ pub(crate) async fn change_password_route( .pusher .get_pushkeys(sender_user) .map(ToOwned::to_owned) - .broad_filter_map(|pushkey| async move { + .broad_filter_map(async |pushkey| { services .pusher .get_pusher_device(&pushkey) .await .ok() - .filter(|pusher_device| pusher_device != sender_device) + .filter(|pusher_device| pusher_device != body.sender_device()) .is_some() .then_some(pushkey) }) - .for_each(|pushkey| async move { + .for_each(async |pushkey| { services.pusher.delete_pusher(sender_user, &pushkey).await; }) .await; @@ -699,13 +698,10 @@ pub(crate) async fn whoami_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let device_id = body.sender_device.clone(); - Ok(whoami::v3::Response { - user_id: sender_user.clone(), - device_id, - is_guest: services.users.is_deactivated(sender_user).await? + user_id: body.sender_user().to_owned(), + device_id: body.sender_device.clone(), + is_guest: services.users.is_deactivated(body.sender_user()).await? && body.appservice_info.is_none(), }) } @@ -733,7 +729,6 @@ pub(crate) async fn deactivate_route( .sender_user .as_ref() .ok_or_else(|| err!(Request(MissingToken("Missing access token."))))?; - let sender_device = body.sender_device(); let mut uiaainfo = UiaaInfo { flows: vec![AuthFlow { stages: vec![AuthType::Password] }], @@ -747,7 +742,7 @@ pub(crate) async fn deactivate_route( | Some(auth) => { let (worked, uiaainfo) = services .uiaa - .try_auth(sender_user, sender_device, auth, &uiaainfo) + .try_auth(sender_user, body.sender_device(), auth, &uiaainfo) .await?; if !worked { @@ -760,7 +755,7 @@ pub(crate) async fn deactivate_route( uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, json); + .create(sender_user, body.sender_device(), &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); }, diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index dc7aad44..97c1a1bd 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -17,7 +17,7 @@ pub(crate) async fn create_alias_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if services.users.is_suspended(sender_user).await? { return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); } @@ -65,7 +65,7 @@ pub(crate) async fn delete_alias_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if services.users.is_suspended(sender_user).await? { return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); } diff --git a/src/api/client/device.rs b/src/api/client/device.rs index 5519a1a5..b0a7e142 100644 --- a/src/api/client/device.rs +++ b/src/api/client/device.rs @@ -21,11 +21,9 @@ pub(crate) async fn get_devices_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let devices: Vec = services .users - .all_devices_metadata(sender_user) + .all_devices_metadata(body.sender_user()) .collect() .await; @@ -39,11 +37,9 @@ pub(crate) async fn get_device_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let device = services .users - .get_device_metadata(sender_user, &body.body.device_id) + .get_device_metadata(body.sender_user(), &body.body.device_id) .await .map_err(|_| err!(Request(NotFound("Device not found."))))?; diff --git a/src/api/client/filter.rs b/src/api/client/filter.rs index 97044ffc..9814d366 100644 --- a/src/api/client/filter.rs +++ b/src/api/client/filter.rs @@ -13,11 +13,9 @@ pub(crate) async fn get_filter_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services .users - .get_filter(sender_user, &body.filter_id) + .get_filter(body.sender_user(), &body.filter_id) .await .map(get_filter::v3::Response::new) .map_err(|_| err!(Request(NotFound("Filter not found.")))) @@ -30,9 +28,9 @@ pub(crate) async fn create_filter_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let filter_id = services.users.create_filter(sender_user, &body.filter); + let filter_id = services + .users + .create_filter(body.sender_user(), &body.filter); Ok(create_filter::v3::Response::new(filter_id)) } diff --git a/src/api/client/keys.rs b/src/api/client/keys.rs index 650c573f..d2bd46a0 100644 --- a/src/api/client/keys.rs +++ b/src/api/client/keys.rs @@ -126,7 +126,7 @@ pub(crate) async fn get_keys_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); get_keys_helper( &services, @@ -157,8 +157,7 @@ pub(crate) async fn upload_signing_keys_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let (sender_user, sender_device) = body.sender(); // UIAA let mut uiaainfo = UiaaInfo { @@ -203,12 +202,12 @@ pub(crate) async fn upload_signing_keys_route( } // Success! }, - | _ => match body.json_body { + | _ => match body.json_body.as_ref() { | Some(json) => { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); services .uiaa - .create(sender_user, sender_device, &uiaainfo, &json); + .create(sender_user, sender_device, &uiaainfo, json); return Err(Error::Uiaa(uiaainfo)); }, @@ -373,7 +372,7 @@ pub(crate) async fn get_key_changes_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); let mut device_list_updates = HashSet::new(); diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 11d5450c..3f491d54 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -51,7 +51,7 @@ pub(crate) async fn create_content_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let user = body.sender_user.as_ref().expect("user is authenticated"); + let user = body.sender_user(); if services.users.is_suspended(user).await? { return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); } @@ -97,7 +97,7 @@ pub(crate) async fn get_content_thumbnail_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let user = body.sender_user.as_ref().expect("user is authenticated"); + let user = body.sender_user(); let dim = Dim::from_ruma(body.width, body.height, body.method.clone())?; let mxc = Mxc { @@ -134,7 +134,7 @@ pub(crate) async fn get_content_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let user = body.sender_user.as_ref().expect("user is authenticated"); + let user = body.sender_user(); let mxc = Mxc { server_name: &body.server_name, @@ -170,7 +170,7 @@ pub(crate) async fn get_content_as_filename_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let user = body.sender_user.as_ref().expect("user is authenticated"); + let user = body.sender_user(); let mxc = Mxc { server_name: &body.server_name, @@ -206,7 +206,7 @@ pub(crate) async fn get_media_preview_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); let url = &body.url; let url = Url::parse(&body.url).map_err(|e| { diff --git a/src/api/client/media_legacy.rs b/src/api/client/media_legacy.rs index d9f24f77..930daab4 100644 --- a/src/api/client/media_legacy.rs +++ b/src/api/client/media_legacy.rs @@ -55,7 +55,7 @@ pub(crate) async fn get_media_preview_legacy_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); let url = &body.url; let url = Url::parse(&body.url).map_err(|e| { diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index e6392533..b5356b94 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -249,14 +249,14 @@ pub(crate) async fn join_room_by_id_or_alias_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_deref().expect("user is authenticated"); + let sender_user = body.sender_user(); let appservice_info = &body.appservice_info; - let body = body.body; + let body = &body.body; if services.users.is_suspended(sender_user).await? { return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); } - let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { | Ok(room_id) => { banned_room_check( &services, diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs index 8d2de68d..e27b3ab8 100644 --- a/src/api/client/openid.rs +++ b/src/api/client/openid.rs @@ -19,9 +19,9 @@ pub(crate) async fn create_openid_token_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); - if sender_user != &body.user_id { + if sender_user != body.user_id { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Not allowed to request OpenID tokens on behalf of other users", diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index bdba4078..76b5dc6d 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -35,7 +35,7 @@ pub(crate) async fn set_displayname_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if services.users.is_suspended(sender_user).await? { return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); } @@ -127,7 +127,7 @@ pub(crate) async fn set_avatar_url_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if services.users.is_suspended(sender_user).await? { return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); } diff --git a/src/api/client/push.rs b/src/api/client/push.rs index 81020ffa..125b26bb 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -106,7 +106,7 @@ pub(crate) async fn get_pushrules_global_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); let Some(content_value) = services .account_data @@ -234,9 +234,8 @@ pub(crate) async fn set_pushrule_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let body = body.body; - + let sender_user = body.sender_user(); + let body = &body.body; let mut account_data: PushRulesEvent = services .account_data .get_global(sender_user, GlobalAccountDataEventType::PushRules) @@ -295,7 +294,7 @@ pub(crate) async fn get_pushrule_actions_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); // remove old deprecated mentions push rules as per MSC4210 #[allow(deprecated)] @@ -329,7 +328,7 @@ pub(crate) async fn set_pushrule_actions_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); let mut account_data: PushRulesEvent = services .account_data @@ -366,7 +365,7 @@ pub(crate) async fn get_pushrule_enabled_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); // remove old deprecated mentions push rules as per MSC4210 #[allow(deprecated)] @@ -400,7 +399,7 @@ pub(crate) async fn set_pushrule_enabled_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); let mut account_data: PushRulesEvent = services .account_data @@ -437,7 +436,7 @@ pub(crate) async fn delete_pushrule_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); let mut account_data: PushRulesEvent = services .account_data @@ -483,7 +482,7 @@ pub(crate) async fn get_pushers_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); Ok(get_pushers::v3::Response { pushers: services.pusher.get_pushers(sender_user).await, @@ -499,7 +498,7 @@ pub(crate) async fn set_pushers_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); services .pusher diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index a8eaf91d..86d871ff 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -15,8 +15,8 @@ pub(crate) async fn redact_event_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let body = body.body; + let sender_user = body.sender_user(); + let body = &body.body; if services.users.is_suspended(sender_user).await? { // TODO: Users can redact their own messages while suspended return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 5113b42f..8ece3ab1 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -38,8 +38,7 @@ pub(crate) async fn report_room_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - // user authentication - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if services.users.is_suspended(sender_user).await? { return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); } @@ -94,7 +93,7 @@ pub(crate) async fn report_event_route( body: Ruma, ) -> Result { // user authentication - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if services.users.is_suspended(sender_user).await? { return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); } diff --git a/src/api/client/room/aliases.rs b/src/api/client/room/aliases.rs index 3f0016af..a944971c 100644 --- a/src/api/client/room/aliases.rs +++ b/src/api/client/room/aliases.rs @@ -15,7 +15,7 @@ pub(crate) async fn get_room_aliases_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services .rooms diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index d1dffc51..4ca64fd8 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -58,7 +58,7 @@ pub(crate) async fn create_room_route( ) -> Result { use create_room::v3::RoomPreset; - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services.globals.allow_room_creation() && body.appservice_info.is_none() @@ -174,7 +174,7 @@ pub(crate) async fn create_room_route( let content = match room_version { | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => - RoomCreateEventContent::new_v1(sender_user.clone()), + RoomCreateEventContent::new_v1(sender_user.to_owned()), | _ => RoomCreateEventContent::new_v11(), }; let mut content = serde_json::from_str::( @@ -239,7 +239,7 @@ pub(crate) async fn create_room_route( | _ => RoomPreset::PrivateChat, // Room visibility should not be custom }); - let mut users = BTreeMap::from_iter([(sender_user.clone(), int!(100))]); + let mut users = BTreeMap::from_iter([(sender_user.to_owned(), int!(100))]); if preset == RoomPreset::TrustedPrivateChat { for invite in &body.invite { diff --git a/src/api/client/session.rs b/src/api/client/session.rs index 2499a43d..992073c6 100644 --- a/src/api/client/session.rs +++ b/src/api/client/session.rs @@ -269,11 +269,9 @@ pub(crate) async fn login_token_route( return Err!(Request(Forbidden("Login via an existing session is not enabled"))); } - let sender_user = body.sender_user(); - let sender_device = body.sender_device(); - // This route SHOULD have UIA // TODO: How do we make only UIA sessions that have not been used before valid? + let (sender_user, sender_device) = body.sender(); let mut uiaainfo = uiaa::UiaaInfo { flows: vec![uiaa::AuthFlow { stages: vec![uiaa::AuthType::Password] }], @@ -335,12 +333,9 @@ pub(crate) async fn logout_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - services .users - .remove_device(sender_user, sender_device) + .remove_device(body.sender_user(), body.sender_device()) .await; Ok(logout::v3::Response::new()) @@ -365,12 +360,10 @@ pub(crate) async fn logout_all_route( InsecureClientIp(client): InsecureClientIp, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services .users - .all_device_ids(sender_user) - .for_each(|device_id| services.users.remove_device(sender_user, device_id)) + .all_device_ids(body.sender_user()) + .for_each(|device_id| services.users.remove_device(body.sender_user(), device_id)) .await; Ok(logout_all::v3::Response::new()) diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 07802b1b..cf371728 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -77,7 +77,7 @@ pub(crate) async fn get_state_events_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if !services .rooms diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs index caafe10d..dd799105 100644 --- a/src/api/client/tag.rs +++ b/src/api/client/tag.rs @@ -21,7 +21,7 @@ pub(crate) async fn update_tag_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); let mut tags_event = services .account_data @@ -58,7 +58,7 @@ pub(crate) async fn delete_tag_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); let mut tags_event = services .account_data @@ -92,7 +92,7 @@ pub(crate) async fn get_tags_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); let tags_event = services .account_data diff --git a/src/api/client/to_device.rs b/src/api/client/to_device.rs index 8ad9dc99..581f4a72 100644 --- a/src/api/client/to_device.rs +++ b/src/api/client/to_device.rs @@ -21,7 +21,7 @@ pub(crate) async fn send_event_to_device_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); let sender_device = body.sender_device.as_deref(); // Check if this is a new transaction id @@ -47,7 +47,7 @@ pub(crate) async fn send_event_to_device_route( serde_json::to_writer( &mut buf, &federation::transactions::edu::Edu::DirectToDevice(DirectDeviceContent { - sender: sender_user.clone(), + sender: sender_user.to_owned(), ev_type: body.event_type.clone(), message_id: count.to_string().into(), messages, diff --git a/src/api/client/unstable.rs b/src/api/client/unstable.rs index e21eaf21..08f70975 100644 --- a/src/api/client/unstable.rs +++ b/src/api/client/unstable.rs @@ -69,7 +69,7 @@ pub(crate) async fn delete_timezone_key_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if *sender_user != body.user_id && body.appservice_info.is_none() { return Err!(Request(Forbidden("You cannot update the profile of another user"))); @@ -97,7 +97,7 @@ pub(crate) async fn set_timezone_key_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if *sender_user != body.user_id && body.appservice_info.is_none() { return Err!(Request(Forbidden("You cannot update the profile of another user"))); @@ -125,7 +125,7 @@ pub(crate) async fn set_profile_key_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if *sender_user != body.user_id && body.appservice_info.is_none() { return Err!(Request(Forbidden("You cannot update the profile of another user"))); @@ -218,7 +218,7 @@ pub(crate) async fn delete_profile_key_route( State(services): State, body: Ruma, ) -> Result { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user(); if *sender_user != body.user_id && body.appservice_info.is_none() { return Err!(Request(Forbidden("You cannot update the profile of another user"))); From 2051c22a281b6daeda252e75c1332d26edd6f48f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Mon, 28 Apr 2025 01:32:13 +0000 Subject: [PATCH 071/270] Support optional device_id's in lazy-loading context. Co-authored-by: Jade Ellis Signed-off-by: Jason Volk --- src/api/client/context.rs | 2 +- src/api/client/message.rs | 28 +++++++++++++-------------- src/api/client/sync/v3.rs | 2 +- src/service/rooms/lazy_loading/mod.rs | 4 ++-- 4 files changed, 17 insertions(+), 19 deletions(-) diff --git a/src/api/client/context.rs b/src/api/client/context.rs index dbc2a22f..ca787a16 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -111,7 +111,7 @@ pub(crate) async fn get_context_route( let lazy_loading_context = lazy_loading::Context { user_id: sender_user, - device_id: sender_device, + device_id: Some(sender_device), room_id, token: Some(base_count.into_unsigned()), options: Some(&filter.lazy_load_options), diff --git a/src/api/client/message.rs b/src/api/client/message.rs index e442850b..7a87a9b0 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -1,5 +1,3 @@ -use core::panic; - use axum::extract::State; use conduwuit::{ Err, Result, at, @@ -34,6 +32,7 @@ use ruma::{ }, serde::Raw, }; +use tracing::warn; use crate::Ruma; @@ -73,7 +72,7 @@ pub(crate) async fn get_message_events_route( ) -> Result { debug_assert!(IGNORED_MESSAGE_TYPES.is_sorted(), "IGNORED_MESSAGE_TYPES is not sorted"); let sender_user = body.sender_user(); - let sender_device = body.sender_device.as_ref(); + let sender_device = body.sender_device.as_deref(); let room_id = &body.room_id; let filter = &body.filter; @@ -137,18 +136,17 @@ pub(crate) async fn get_message_events_route( let lazy_loading_context = lazy_loading::Context { user_id: sender_user, - device_id: match sender_device { - | Some(device_id) => device_id, - | None => - if let Some(registration) = body.appservice_info.as_ref() { - <&DeviceId>::from(registration.registration.id.as_str()) - } else { - panic!( - "No device_id provided and no appservice registration found, this \ - should be unreachable" - ); - }, - }, + device_id: sender_device.or_else(|| { + if let Some(registration) = body.appservice_info.as_ref() { + Some(<&DeviceId>::from(registration.registration.id.as_str())) + } else { + warn!( + "No device_id provided and no appservice registration found, this should be \ + unreachable" + ); + None + } + }), room_id, token: Some(from.into_unsigned()), options: Some(&filter.lazy_load_options), diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 7bc74c95..feaf8689 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -645,7 +645,7 @@ async fn load_joined_room( let lazy_loading_context = &lazy_loading::Context { user_id: sender_user, - device_id: sender_device, + device_id: Some(sender_device), room_id, token: Some(since), options: Some(&filter.room.state.lazy_load_options), diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 346314d1..61f081a9 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -27,7 +27,7 @@ pub trait Options: Send + Sync { #[derive(Clone, Debug)] pub struct Context<'a> { pub user_id: &'a UserId, - pub device_id: &'a DeviceId, + pub device_id: Option<&'a DeviceId>, pub room_id: &'a RoomId, pub token: Option, pub options: Option<&'a LazyLoadOptions>, @@ -40,7 +40,7 @@ pub enum Status { } pub type Witness = HashSet; -type Key<'a> = (&'a UserId, &'a DeviceId, &'a RoomId, &'a UserId); +type Key<'a> = (&'a UserId, Option<&'a DeviceId>, &'a RoomId, &'a UserId); impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { From f3dd90df39da1d5ff1e4aabc150990efe5e64f5e Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 26 Apr 2025 08:23:57 +0000 Subject: [PATCH 072/270] Mitigate large futures Signed-off-by: Jason Volk --- src/admin/room/moderation.rs | 12 +++++++++--- src/admin/user/commands.rs | 6 ++++-- src/api/client/account.rs | 6 ++++-- src/api/client/membership.rs | 12 +++++++++--- src/api/client/state.rs | 3 ++- src/service/admin/mod.rs | 3 +-- 6 files changed, 29 insertions(+), 13 deletions(-) diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index 921249bd..5fb5bb3e 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -5,7 +5,7 @@ use conduwuit::{ utils::{IterStream, ReadyExt}, warn, }; -use futures::StreamExt; +use futures::{FutureExt, StreamExt}; use ruma::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId}; use crate::{admin_command, admin_command_dispatch, get_room_info}; @@ -132,7 +132,10 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { evicting admins too)", ); - if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { + if let Err(e) = leave_room(self.services, user_id, &room_id, None) + .boxed() + .await + { warn!("Failed to leave room: {e}"); } @@ -297,7 +300,10 @@ async fn ban_list_of_rooms(&self) -> Result { evicting admins too)", ); - if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { + if let Err(e) = leave_room(self.services, user_id, &room_id, None) + .boxed() + .await + { warn!("Failed to leave room: {e}"); } diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index d094fc5f..89f7a9fc 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -8,7 +8,7 @@ use conduwuit::{ warn, }; use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; -use futures::StreamExt; +use futures::{FutureExt, StreamExt}; use ruma::{ OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId, events::{ @@ -696,7 +696,9 @@ pub(super) async fn force_leave_room( return Err!("{user_id} is not joined in the room"); } - leave_room(self.services, &user_id, &room_id, None).await?; + leave_room(self.services, &user_id, &room_id, None) + .boxed() + .await?; self.write_str(&format!("{user_id} has left {room_id}.",)) .await diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 30f8b89c..2e822f02 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -777,7 +777,9 @@ pub(crate) async fn deactivate_route( super::update_displayname(&services, sender_user, None, &all_joined_rooms).await; super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await; - full_user_deactivate(&services, sender_user, &all_joined_rooms).await?; + full_user_deactivate(&services, sender_user, &all_joined_rooms) + .boxed() + .await?; info!("User {sender_user} deactivated their account."); @@ -929,7 +931,7 @@ pub async fn full_user_deactivate( } } - super::leave_all_rooms(services, user_id).await; + super::leave_all_rooms(services, user_id).boxed().await; Ok(()) } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index b5356b94..85d0cd21 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -114,7 +114,9 @@ async fn banned_room_check( .collect() .await; - full_user_deactivate(services, user_id, &all_joined_rooms).await?; + full_user_deactivate(services, user_id, &all_joined_rooms) + .boxed() + .await?; } return Err!(Request(Forbidden("This room is banned on this homeserver."))); @@ -153,7 +155,9 @@ async fn banned_room_check( .collect() .await; - full_user_deactivate(services, user_id, &all_joined_rooms).await?; + full_user_deactivate(services, user_id, &all_joined_rooms) + .boxed() + .await?; } return Err!(Request(Forbidden("This remote server is banned on this homeserver."))); @@ -265,6 +269,7 @@ pub(crate) async fn join_room_by_id_or_alias_route( room_id.server_name(), client, ) + .boxed() .await?; let mut servers = body.via.clone(); @@ -487,6 +492,7 @@ pub(crate) async fn leave_room_route( body: Ruma, ) -> Result { leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()) + .boxed() .await .map(|()| leave_room::v3::Response::new()) } @@ -1825,7 +1831,7 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { for room_id in all_rooms { // ignore errors - if let Err(e) = leave_room(services, user_id, &room_id, None).await { + if let Err(e) = leave_room(services, user_id, &room_id, None).boxed().await { warn!(%user_id, "Failed to leave {room_id} remotely: {e}"); } diff --git a/src/api/client/state.rs b/src/api/client/state.rs index cf371728..c0303200 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -5,7 +5,7 @@ use conduwuit::{ utils::BoolExt, }; use conduwuit_service::Services; -use futures::TryStreamExt; +use futures::{FutureExt, TryStreamExt}; use ruma::{ OwnedEventId, RoomId, UserId, api::client::state::{get_state_events, get_state_events_for_key, send_state_event}, @@ -63,6 +63,7 @@ pub(crate) async fn send_state_event_for_empty_key_route( body: Ruma, ) -> Result> { send_state_event_for_key_route(State(services), body) + .boxed() .await .map(RumaResponse) } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 11d93cc2..19a523ca 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -4,7 +4,6 @@ mod execute; mod grant; use std::{ - future::Future, pin::Pin, sync::{Arc, RwLock as StdRwLock, Weak}, }; @@ -14,7 +13,7 @@ use conduwuit::{ Error, PduEvent, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder, }; pub use create::create_admin_room; -use futures::{FutureExt, TryFutureExt}; +use futures::{Future, FutureExt, TryFutureExt}; use loole::{Receiver, Sender}; use ruma::{ OwnedEventId, OwnedRoomId, RoomId, UserId, From 732a77f3a8f1e02c6a436604792c5377ff4d4bf9 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 26 Apr 2025 23:01:21 +0000 Subject: [PATCH 073/270] Use integrated error instead of panic on some legacy codepaths Signed-off-by: Jason Volk --- src/api/client/account.rs | 6 +-- src/api/client/capabilities.rs | 16 +++---- src/api/client/push.rs | 86 ++++++++++++---------------------- src/api/client/read_marker.rs | 4 +- src/api/client/room/create.rs | 22 +++------ src/api/client/tag.rs | 4 +- 6 files changed, 49 insertions(+), 89 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 2e822f02..b6ff0f2b 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -351,8 +351,7 @@ pub(crate) async fn register_route( if !services.globals.new_user_displayname_suffix().is_empty() && body.appservice_info.is_none() { - write!(displayname, " {}", services.server.config.new_user_displayname_suffix) - .expect("should be able to write to string buffer"); + write!(displayname, " {}", services.server.config.new_user_displayname_suffix)?; } services @@ -370,8 +369,7 @@ pub(crate) async fn register_route( content: ruma::events::push_rules::PushRulesEventContent { global: push::Ruleset::server_default(&user_id), }, - }) - .expect("to json always works"), + })?, ) .await?; diff --git a/src/api/client/capabilities.rs b/src/api/client/capabilities.rs index 7362c4f9..c42c6dfd 100644 --- a/src/api/client/capabilities.rs +++ b/src/api/client/capabilities.rs @@ -26,8 +26,8 @@ pub(crate) async fn get_capabilities_route( let mut capabilities = Capabilities::default(); capabilities.room_versions = RoomVersionsCapability { - default: services.server.config.default_room_version.clone(), available, + default: services.server.config.default_room_version.clone(), }; // we do not implement 3PID stuff @@ -38,16 +38,12 @@ pub(crate) async fn get_capabilities_route( }; // MSC4133 capability - capabilities - .set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true})) - .expect("this is valid JSON we created"); + capabilities.set("uk.tcpip.msc4133.profile_fields", json!({"enabled": true}))?; - capabilities - .set( - "org.matrix.msc4267.forget_forced_upon_leave", - json!({"enabled": services.config.forget_forced_upon_leave}), - ) - .expect("valid JSON we created"); + capabilities.set( + "org.matrix.msc4267.forget_forced_upon_leave", + json!({"enabled": services.config.forget_forced_upon_leave}), + )?; Ok(get_capabilities::v3::Response { capabilities }) } diff --git a/src/api/client/push.rs b/src/api/client/push.rs index 125b26bb..74e29422 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -79,17 +79,14 @@ pub(crate) async fn get_pushrules_all_route( global_ruleset.update_with_server_default(Ruleset::server_default(sender_user)); + let ty = GlobalAccountDataEventType::PushRules; + let event = PushRulesEvent { + content: PushRulesEventContent { global: global_ruleset.clone() }, + }; + services .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(PushRulesEvent { - content: PushRulesEventContent { global: global_ruleset.clone() }, - }) - .expect("to json always works"), - ) + .update(None, sender_user, ty.to_string().into(), &serde_json::to_value(event)?) .await?; } }; @@ -118,19 +115,17 @@ pub(crate) async fn get_pushrules_global_route( else { // user somehow has non-existent push rule event. recreate it and return server // default silently + + let ty = GlobalAccountDataEventType::PushRules; + let event = PushRulesEvent { + content: PushRulesEventContent { + global: Ruleset::server_default(sender_user), + }, + }; + services .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(PushRulesEvent { - content: PushRulesEventContent { - global: Ruleset::server_default(sender_user), - }, - }) - .expect("to json always works"), - ) + .update(None, sender_user, ty.to_string().into(), &serde_json::to_value(event)?) .await?; return Ok(get_pushrules_global_scope::v3::Response { @@ -274,14 +269,10 @@ pub(crate) async fn set_pushrule_route( return Err(err); } + let ty = GlobalAccountDataEventType::PushRules; services .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - ) + .update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?) .await?; Ok(set_pushrule::v3::Response {}) @@ -345,14 +336,10 @@ pub(crate) async fn set_pushrule_actions_route( return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")); } + let ty = GlobalAccountDataEventType::PushRules; services .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - ) + .update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?) .await?; Ok(set_pushrule_actions::v3::Response {}) @@ -416,14 +403,10 @@ pub(crate) async fn set_pushrule_enabled_route( return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")); } + let ty = GlobalAccountDataEventType::PushRules; services .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - ) + .update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?) .await?; Ok(set_pushrule_enabled::v3::Response {}) @@ -462,14 +445,10 @@ pub(crate) async fn delete_pushrule_route( return Err(err); } + let ty = GlobalAccountDataEventType::PushRules; services .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(account_data).expect("to json value always works"), - ) + .update(None, sender_user, ty.to_string().into(), &serde_json::to_value(account_data)?) .await?; Ok(delete_pushrule::v3::Response {}) @@ -514,19 +493,16 @@ async fn recreate_push_rules_and_return( services: &Services, sender_user: &ruma::UserId, ) -> Result { + let ty = GlobalAccountDataEventType::PushRules; + let event = PushRulesEvent { + content: PushRulesEventContent { + global: Ruleset::server_default(sender_user), + }, + }; + services .account_data - .update( - None, - sender_user, - GlobalAccountDataEventType::PushRules.to_string().into(), - &serde_json::to_value(PushRulesEvent { - content: PushRulesEventContent { - global: Ruleset::server_default(sender_user), - }, - }) - .expect("to json always works"), - ) + .update(None, sender_user, ty.to_string().into(), &serde_json::to_value(event)?) .await?; Ok(get_pushrules_all::v3::Response { diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index e152869c..9d813294 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -37,7 +37,7 @@ pub(crate) async fn set_read_marker_route( Some(&body.room_id), sender_user, RoomAccountDataEventType::FullyRead, - &serde_json::to_value(fully_read_event).expect("to json value always works"), + &serde_json::to_value(fully_read_event)?, ) .await?; } @@ -151,7 +151,7 @@ pub(crate) async fn create_receipt_route( Some(&body.room_id), sender_user, RoomAccountDataEventType::FullyRead, - &serde_json::to_value(fully_read_event).expect("to json value always works"), + &serde_json::to_value(fully_read_event)?, ) .await?; }, diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 4ca64fd8..aa54e1e9 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -177,18 +177,10 @@ pub(crate) async fn create_room_route( RoomCreateEventContent::new_v1(sender_user.to_owned()), | _ => RoomCreateEventContent::new_v11(), }; - let mut content = serde_json::from_str::( - to_raw_value(&content) - .expect("we just created this as content was None") - .get(), - ) - .unwrap(); - content.insert( - "room_version".into(), - json!(room_version.as_str()) - .try_into() - .expect("we just created this as content was None"), - ); + let mut content = + serde_json::from_str::(to_raw_value(&content)?.get()) + .unwrap(); + content.insert("room_version".into(), json!(room_version.as_str()).try_into()?); content }, }; @@ -200,8 +192,7 @@ pub(crate) async fn create_room_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomCreate, - content: to_raw_value(&create_content) - .expect("create event content serialization"), + content: to_raw_value(&create_content)?, state_key: Some(StateKey::new()), ..Default::default() }, @@ -267,8 +258,7 @@ pub(crate) async fn create_room_route( .build_and_append_pdu( PduBuilder { event_type: TimelineEventType::RoomPowerLevels, - content: to_raw_value(&power_levels_content) - .expect("serialized power_levels event content"), + content: to_raw_value(&power_levels_content)?, state_key: Some(StateKey::new()), ..Default::default() }, diff --git a/src/api/client/tag.rs b/src/api/client/tag.rs index dd799105..68105e4f 100644 --- a/src/api/client/tag.rs +++ b/src/api/client/tag.rs @@ -42,7 +42,7 @@ pub(crate) async fn update_tag_route( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, - &serde_json::to_value(tags_event).expect("to json value always works"), + &serde_json::to_value(tags_event)?, ) .await?; @@ -76,7 +76,7 @@ pub(crate) async fn delete_tag_route( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, - &serde_json::to_value(tags_event).expect("to json value always works"), + &serde_json::to_value(tags_event)?, ) .await?; From 21bbee8e3cd7c982e2fbc38aeebf8f387da05165 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 26 Apr 2025 23:04:58 +0000 Subject: [PATCH 074/270] Simplify api to send notices to admin room Signed-off-by: Jason Volk --- src/api/client/account.rs | 60 +++++++++++------------------------ src/api/client/room/create.rs | 14 ++++---- src/service/admin/mod.rs | 7 ++++ 3 files changed, 32 insertions(+), 49 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index b6ff0f2b..27d93bef 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -26,10 +26,7 @@ use ruma::{ }, events::{ GlobalAccountDataEventType, StateEventType, - room::{ - message::RoomMessageEventContent, - power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - }, + room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, }, push, }; @@ -414,32 +411,21 @@ pub(crate) async fn register_route( // log in conduit admin channel if a non-guest user registered if body.appservice_info.is_none() && !is_guest { if !device_display_name.is_empty() { - info!( - "New user \"{user_id}\" registered on this server with device display name: \ - \"{device_display_name}\"" + let notice = format!( + "New user \"{user_id}\" registered on this server from IP {client} and device \ + display name \"{device_display_name}\"" ); + info!("{notice}"); if services.server.config.admin_room_notices { - services - .admin - .send_message(RoomMessageEventContent::notice_plain(format!( - "New user \"{user_id}\" registered on this server from IP {client} and \ - device display name \"{device_display_name}\"" - ))) - .await - .ok(); + services.admin.notice(¬ice).await; } } else { - info!("New user \"{user_id}\" registered on this server."); + let notice = format!("New user \"{user_id}\" registered on this server."); + info!("{notice}"); if services.server.config.admin_room_notices { - services - .admin - .send_message(RoomMessageEventContent::notice_plain(format!( - "New user \"{user_id}\" registered on this server from IP {client}" - ))) - .await - .ok(); + services.admin.notice(¬ice).await; } } } @@ -452,24 +438,22 @@ pub(crate) async fn register_route( if services.server.config.admin_room_notices { services .admin - .send_message(RoomMessageEventContent::notice_plain(format!( + .notice(&format!( "Guest user \"{user_id}\" with device display name \ \"{device_display_name}\" registered on this server from IP {client}" - ))) - .await - .ok(); + )) + .await; } } else { #[allow(clippy::collapsible_else_if)] if services.server.config.admin_room_notices { services .admin - .send_message(RoomMessageEventContent::notice_plain(format!( + .notice(&format!( "Guest user \"{user_id}\" with no device display name registered on \ this server from IP {client}", - ))) - .await - .ok(); + )) + .await; } } } @@ -677,11 +661,8 @@ pub(crate) async fn change_password_route( if services.server.config.admin_room_notices { services .admin - .send_message(RoomMessageEventContent::notice_plain(format!( - "User {sender_user} changed their password." - ))) - .await - .ok(); + .notice(&format!("User {sender_user} changed their password.")) + .await; } Ok(change_password::v3::Response {}) @@ -784,11 +765,8 @@ pub(crate) async fn deactivate_route( if services.server.config.admin_room_notices { services .admin - .send_message(RoomMessageEventContent::notice_plain(format!( - "User {sender_user} deactivated their account." - ))) - .await - .ok(); + .notice(&format!("User {sender_user} deactivated their account.")) + .await; } Ok(deactivate::v3::Response { diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index aa54e1e9..8b93fcfd 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -92,19 +92,17 @@ pub(crate) async fn create_room_route( && !services.users.is_admin(sender_user).await && body.appservice_info.is_none() { - info!( - "Non-admin user {sender_user} tried to publish {0} to the room directory while \ - \"lockdown_public_room_directory\" is enabled", - &room_id + warn!( + "Non-admin user {sender_user} tried to publish {room_id} to the room directory \ + while \"lockdown_public_room_directory\" is enabled" ); if services.server.config.admin_room_notices { services .admin - .send_text(&format!( - "Non-admin user {sender_user} tried to publish {0} to the room directory \ - while \"lockdown_public_room_directory\" is enabled", - &room_id + .notice(&format!( + "Non-admin user {sender_user} tried to publish {room_id} to the room \ + directory while \"lockdown_public_room_directory\" is enabled" )) .await; } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 19a523ca..a76c3ef6 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -142,6 +142,13 @@ impl crate::Service for Service { } impl Service { + /// Sends markdown notice to the admin room as the admin user. + pub async fn notice(&self, body: &str) { + self.send_message(RoomMessageEventContent::notice_markdown(body)) + .await + .ok(); + } + /// Sends markdown message (not an m.notice for notification reasons) to the /// admin room as the admin user. pub async fn send_text(&self, body: &str) { From 667afedd24d40db1d63fa052dbd55e872b493225 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 26 Apr 2025 23:50:03 +0000 Subject: [PATCH 075/270] Macroize various remaining Error constructions. Signed-off-by: Jason Volk --- src/api/client/account.rs | 16 +-- src/api/client/openid.rs | 13 +-- src/api/client/profile.rs | 15 ++- src/api/client/push.rs | 6 +- src/api/client/report.rs | 32 ++---- src/api/client/room/aliases.rs | 9 +- src/api/client/room/create.rs | 100 ++++++++---------- src/api/server/invite.rs | 2 +- .../rooms/event_handler/handle_outlier_pdu.rs | 10 +- 9 files changed, 78 insertions(+), 125 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 27d93bef..14bbcf98 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -13,22 +13,14 @@ use conduwuit_service::Services; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; use ruma::{ - OwnedRoomId, UserId, api::client::{ account::{ - ThirdPartyIdRemovalStatus, change_password, check_registration_token_validity, - deactivate, get_3pids, get_username_availability, - register::{self, LoginType}, - request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, - whoami, + change_password, check_registration_token_validity, deactivate, get_3pids, get_username_availability, register::{self, LoginType}, request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, whoami, ThirdPartyIdRemovalStatus }, uiaa::{AuthFlow, AuthType, UiaaInfo}, - }, - events::{ - GlobalAccountDataEventType, StateEventType, - room::power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, - }, - push, + }, events::{ + room::{message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}}, GlobalAccountDataEventType, StateEventType + }, push, OwnedRoomId, UserId }; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper}; diff --git a/src/api/client/openid.rs b/src/api/client/openid.rs index e27b3ab8..0390b4b3 100644 --- a/src/api/client/openid.rs +++ b/src/api/client/openid.rs @@ -1,11 +1,8 @@ use std::time::Duration; use axum::extract::State; -use conduwuit::{Error, Result, utils}; -use ruma::{ - api::client::{account, error::ErrorKind}, - authentication::TokenType, -}; +use conduwuit::{Err, Result, utils}; +use ruma::{api::client::account, authentication::TokenType}; use super::TOKEN_LENGTH; use crate::Ruma; @@ -22,14 +19,12 @@ pub(crate) async fn create_openid_token_route( let sender_user = body.sender_user(); if sender_user != body.user_id { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, + return Err!(Request(InvalidParam( "Not allowed to request OpenID tokens on behalf of other users", - )); + ))); } let access_token = utils::random_string(TOKEN_LENGTH); - let expires_in = services .users .create_openid_token(&body.user_id, &access_token)?; diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 76b5dc6d..6efad64e 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - Err, Error, Result, + Err, Result, matrix::pdu::PduBuilder, utils::{IterStream, stream::TryIgnore}, warn, @@ -12,11 +12,8 @@ use futures::{StreamExt, TryStreamExt, future::join3}; use ruma::{ OwnedMxcUri, OwnedRoomId, UserId, api::{ - client::{ - error::ErrorKind, - profile::{ - get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, - }, + client::profile::{ + get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, }, federation, }, @@ -110,7 +107,7 @@ pub(crate) async fn get_displayname_route( if !services.users.exists(&body.user_id).await { // Return 404 if this user doesn't exist and we couldn't fetch it over // federation - return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); + return Err!(Request(NotFound("Profile was not found."))); } Ok(get_display_name::v3::Response { @@ -214,7 +211,7 @@ pub(crate) async fn get_avatar_url_route( if !services.users.exists(&body.user_id).await { // Return 404 if this user doesn't exist and we couldn't fetch it over // federation - return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); + return Err!(Request(NotFound("Profile was not found."))); } Ok(get_avatar_url::v3::Response { @@ -287,7 +284,7 @@ pub(crate) async fn get_profile_route( if !services.users.exists(&body.user_id).await { // Return 404 if this user doesn't exist and we couldn't fetch it over // federation - return Err(Error::BadRequest(ErrorKind::NotFound, "Profile was not found.")); + return Err!(Request(NotFound("Profile was not found."))); } let mut custom_profile_fields: BTreeMap = services diff --git a/src/api/client/push.rs b/src/api/client/push.rs index 74e29422..d8d84ec7 100644 --- a/src/api/client/push.rs +++ b/src/api/client/push.rs @@ -218,7 +218,7 @@ pub(crate) async fn get_pushrule_route( if let Some(rule) = rule { Ok(get_pushrule::v3::Response { rule }) } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")) + Err!(Request(NotFound("Push rule not found."))) } } @@ -333,7 +333,7 @@ pub(crate) async fn set_pushrule_actions_route( .set_actions(body.kind.clone(), &body.rule_id, body.actions.clone()) .is_err() { - return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")); + return Err!(Request(NotFound("Push rule not found."))); } let ty = GlobalAccountDataEventType::PushRules; @@ -400,7 +400,7 @@ pub(crate) async fn set_pushrule_enabled_route( .set_enabled(body.kind.clone(), &body.rule_id, body.enabled) .is_err() { - return Err(Error::BadRequest(ErrorKind::NotFound, "Push rule not found.")); + return Err!(Request(NotFound("Push rule not found."))); } let ty = GlobalAccountDataEventType::PushRules; diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 8ece3ab1..1019b358 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -2,7 +2,7 @@ use std::{fmt::Write as _, ops::Mul, time::Duration}; use axum::extract::State; use axum_client_ip::InsecureClientIp; -use conduwuit::{Err, Error, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt}; +use conduwuit::{Err, Result, debug_info, info, matrix::pdu::PduEvent, utils::ReadyExt}; use conduwuit_service::Services; use rand::Rng; use ruma::{ @@ -44,9 +44,8 @@ pub(crate) async fn report_room_route( } if body.reason.as_ref().is_some_and(|s| s.len() > 750) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Reason too long, should be 750 characters or fewer", + return Err!(Request( + InvalidParam("Reason too long, should be 750 characters or fewer",) )); } @@ -149,9 +148,8 @@ pub(crate) async fn report_user_route( } if body.reason.as_ref().is_some_and(|s| s.len() > 750) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Reason too long, should be 750 characters or fewer", + return Err!(Request( + InvalidParam("Reason too long, should be 750 characters or fewer",) )); } @@ -204,23 +202,16 @@ async fn is_event_report_valid( ); if room_id != pdu.room_id { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Event ID does not belong to the reported room", - )); + return Err!(Request(NotFound("Event ID does not belong to the reported room",))); } if score.is_some_and(|s| s > int!(0) || s < int!(-100)) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Invalid score, must be within 0 to -100", - )); + return Err!(Request(InvalidParam("Invalid score, must be within 0 to -100",))); } if reason.as_ref().is_some_and(|s| s.len() > 750) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Reason too long, should be 750 characters or fewer", + return Err!(Request( + InvalidParam("Reason too long, should be 750 characters or fewer",) )); } @@ -231,10 +222,7 @@ async fn is_event_report_valid( .ready_any(|user_id| user_id == sender_user) .await { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "You are not in the room you are reporting.", - )); + return Err!(Request(NotFound("You are not in the room you are reporting.",))); } Ok(()) diff --git a/src/api/client/room/aliases.rs b/src/api/client/room/aliases.rs index a944971c..0b072b74 100644 --- a/src/api/client/room/aliases.rs +++ b/src/api/client/room/aliases.rs @@ -1,7 +1,7 @@ use axum::extract::State; -use conduwuit::{Error, Result}; +use conduwuit::{Err, Result}; use futures::StreamExt; -use ruma::api::client::{error::ErrorKind, room::aliases}; +use ruma::api::client::room::aliases; use crate::Ruma; @@ -23,10 +23,7 @@ pub(crate) async fn get_room_aliases_route( .user_can_see_state_events(sender_user, &body.room_id) .await { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "You don't have permission to view this room.", - )); + return Err!(Request(Forbidden("You don't have permission to view this room.",))); } Ok(aliases::v3::Response { diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index 8b93fcfd..238691d1 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - Err, Error, Result, debug_info, debug_warn, err, error, info, + Err, Result, debug_info, debug_warn, err, info, matrix::{StateKey, pdu::PduBuilder}, warn, }; @@ -10,10 +10,7 @@ use conduwuit_service::{Services, appservice::RegistrationInfo}; use futures::FutureExt; use ruma::{ CanonicalJsonObject, Int, OwnedRoomAliasId, OwnedRoomId, OwnedUserId, RoomId, RoomVersionId, - api::client::{ - error::ErrorKind, - room::{self, create_room}, - }, + api::client::room::{self, create_room}, events::{ TimelineEventType, room::{ @@ -64,10 +61,7 @@ pub(crate) async fn create_room_route( && body.appservice_info.is_none() && !services.users.is_admin(sender_user).await { - return Err(Error::BadRequest( - ErrorKind::forbidden(), - "Room creation has been disabled.", - )); + return Err!(Request(Forbidden("Room creation has been disabled.",))); } if services.users.is_suspended(sender_user).await? { @@ -81,10 +75,7 @@ pub(crate) async fn create_room_route( // check if room ID doesn't already exist instead of erroring on auth check if services.rooms.short.get_shortroomid(&room_id).await.is_ok() { - return Err(Error::BadRequest( - ErrorKind::RoomInUse, - "Room with that custom room ID already exists", - )); + return Err!(Request(RoomInUse("Room with that custom room ID already exists",))); } if body.visibility == room::Visibility::Public @@ -127,10 +118,9 @@ pub(crate) async fn create_room_route( if services.server.supported_room_version(&room_version) { room_version } else { - return Err(Error::BadRequest( - ErrorKind::UnsupportedRoomVersion, - "This server does not support that room version.", - )); + return Err!(Request(UnsupportedRoomVersion( + "This server does not support that room version." + ))); }, | None => services.server.config.default_room_version.clone(), }; @@ -142,16 +132,17 @@ pub(crate) async fn create_room_route( let mut content = content .deserialize_as::() .map_err(|e| { - error!("Failed to deserialise content as canonical JSON: {}", e); - Error::bad_database("Failed to deserialise content as canonical JSON.") + err!(Request(BadJson(error!( + "Failed to deserialise content as canonical JSON: {e}" + )))) })?; + match room_version { | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { content.insert( "creator".into(), json!(&sender_user).try_into().map_err(|e| { - info!("Invalid creation content: {e}"); - Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + err!(Request(BadJson(debug_error!("Invalid creation content: {e}")))) })?, ); }, @@ -161,9 +152,9 @@ pub(crate) async fn create_room_route( } content.insert( "room_version".into(), - json!(room_version.as_str()).try_into().map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") - })?, + json!(room_version.as_str()) + .try_into() + .map_err(|e| err!(Request(BadJson("Invalid creation content: {e}"))))?, ); content }, @@ -345,8 +336,7 @@ pub(crate) async fn create_room_route( // 6. Events listed in initial_state for event in &body.initial_state { let mut pdu_builder = event.deserialize_as::().map_err(|e| { - warn!("Invalid initial state event: {:?}", e); - Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.") + err!(Request(InvalidParam(warn!("Invalid initial state event: {e:?}")))) })?; debug_info!("Room creation initial state event: {event:?}"); @@ -355,7 +345,7 @@ pub(crate) async fn create_room_route( // state event in there with the content of literally `{}` (not null or empty // string), let's just skip it over and warn. if pdu_builder.content.get().eq("{}") { - info!("skipping empty initial state event with content of `{{}}`: {event:?}"); + debug_warn!("skipping empty initial state event with content of `{{}}`: {event:?}"); debug_warn!("content: {}", pdu_builder.content.get()); continue; } @@ -502,9 +492,7 @@ fn default_power_levels_content( if let Some(power_level_content_override) = power_level_content_override { let json: JsonObject = serde_json::from_str(power_level_content_override.json().get()) - .map_err(|_| { - Error::BadRequest(ErrorKind::BadJson, "Invalid power_level_content_override.") - })?; + .map_err(|e| err!(Request(BadJson("Invalid power_level_content_override: {e:?}"))))?; for (key, value) in json { power_levels_content[key] = value; @@ -522,16 +510,14 @@ async fn room_alias_check( ) -> Result { // Basic checks on the room alias validity if room_alias_name.contains(':') { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, + return Err!(Request(InvalidParam( "Room alias contained `:` which is not allowed. Please note that this expects a \ localpart, not the full room alias.", - )); + ))); } else if room_alias_name.contains(char::is_whitespace) { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, + return Err!(Request(InvalidParam( "Room alias contained spaces which is not a valid room alias.", - )); + ))); } // check if room alias is forbidden @@ -540,7 +526,7 @@ async fn room_alias_check( .forbidden_alias_names() .is_match(room_alias_name) { - return Err(Error::BadRequest(ErrorKind::Unknown, "Room alias name is forbidden.")); + return Err!(Request(Unknown("Room alias name is forbidden."))); } let server_name = services.globals.server_name(); @@ -560,25 +546,19 @@ async fn room_alias_check( .await .is_ok() { - return Err(Error::BadRequest(ErrorKind::RoomInUse, "Room alias already exists.")); + return Err!(Request(RoomInUse("Room alias already exists."))); } if let Some(info) = appservice_info { if !info.aliases.is_match(full_room_alias.as_str()) { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "Room alias is not in namespace.", - )); + return Err!(Request(Exclusive("Room alias is not in namespace."))); } } else if services .appservice .is_exclusive_alias(&full_room_alias) .await { - return Err(Error::BadRequest( - ErrorKind::Exclusive, - "Room alias reserved by appservice.", - )); + return Err!(Request(Exclusive("Room alias reserved by appservice.",))); } debug_info!("Full room alias: {full_room_alias}"); @@ -594,24 +574,33 @@ fn custom_room_id_check(services: &Services, custom_room_id: &str) -> Result Result( v.insert(auth_event); }, | hash_map::Entry::Occupied(_) => { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, + return Err!(Request(InvalidParam( "Auth event's type and state_key combination exists multiple times.", - )); + ))); }, } } From 3d0360bcd65ad8842120f305dfd1905e36868099 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 27 Apr 2025 00:17:39 +0000 Subject: [PATCH 076/270] Dedup and parallelize current key backup count and etag fetching. Signed-off-by: Jason Volk --- src/api/client/backup.rs | 134 +++++++++++++-------------------------- 1 file changed, 43 insertions(+), 91 deletions(-) diff --git a/src/api/client/backup.rs b/src/api/client/backup.rs index 2ad37cf3..a3038f26 100644 --- a/src/api/client/backup.rs +++ b/src/api/client/backup.rs @@ -2,8 +2,10 @@ use std::cmp::Ordering; use axum::extract::State; use conduwuit::{Err, Result, err}; +use conduwuit_service::Services; +use futures::{FutureExt, future::try_join}; use ruma::{ - UInt, + UInt, UserId, api::client::backup::{ add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, create_backup_version, delete_backup_keys, delete_backup_keys_for_room, @@ -58,21 +60,9 @@ pub(crate) async fn get_latest_backup_info_route( .await .map_err(|_| err!(Request(NotFound("Key backup does not exist."))))?; - Ok(get_latest_backup_info::v3::Response { - algorithm, - count: (UInt::try_from( - services - .key_backups - .count_keys(body.sender_user(), &version) - .await, - ) - .expect("user backup keys count should not be that high")), - etag: services - .key_backups - .get_etag(body.sender_user(), &version) - .await, - version, - }) + let (count, etag) = get_count_etag(&services, body.sender_user(), &version).await?; + + Ok(get_latest_backup_info::v3::Response { algorithm, count, etag, version }) } /// # `GET /_matrix/client/v3/room_keys/version/{version}` @@ -90,17 +80,12 @@ pub(crate) async fn get_backup_info_route( err!(Request(NotFound("Key backup does not exist at version {:?}", body.version))) })?; + let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; + Ok(get_backup_info::v3::Response { algorithm, - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, + count, + etag, version: body.version.clone(), }) } @@ -155,17 +140,9 @@ pub(crate) async fn add_backup_keys_route( } } - Ok(add_backup_keys::v3::Response { - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - }) + let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; + + Ok(add_backup_keys::v3::Response { count, etag }) } /// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}` @@ -198,17 +175,9 @@ pub(crate) async fn add_backup_keys_for_room_route( .await?; } - Ok(add_backup_keys_for_room::v3::Response { - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - }) + let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; + + Ok(add_backup_keys_for_room::v3::Response { count, etag }) } /// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` @@ -306,17 +275,9 @@ pub(crate) async fn add_backup_keys_for_session_route( .await?; } - Ok(add_backup_keys_for_session::v3::Response { - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - }) + let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; + + Ok(add_backup_keys_for_session::v3::Response { count, etag }) } /// # `GET /_matrix/client/r0/room_keys/keys` @@ -379,17 +340,9 @@ pub(crate) async fn delete_backup_keys_route( .delete_all_keys(body.sender_user(), &body.version) .await; - Ok(delete_backup_keys::v3::Response { - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - }) + let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; + + Ok(delete_backup_keys::v3::Response { count, etag }) } /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` @@ -404,17 +357,9 @@ pub(crate) async fn delete_backup_keys_for_room_route( .delete_room_keys(body.sender_user(), &body.version, &body.room_id) .await; - Ok(delete_backup_keys_for_room::v3::Response { - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - }) + let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; + + Ok(delete_backup_keys_for_room::v3::Response { count, etag }) } /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` @@ -429,15 +374,22 @@ pub(crate) async fn delete_backup_keys_for_session_route( .delete_room_key(body.sender_user(), &body.version, &body.room_id, &body.session_id) .await; - Ok(delete_backup_keys_for_session::v3::Response { - count: services - .key_backups - .count_keys(body.sender_user(), &body.version) - .await - .try_into()?, - etag: services - .key_backups - .get_etag(body.sender_user(), &body.version) - .await, - }) + let (count, etag) = get_count_etag(&services, body.sender_user(), &body.version).await?; + + Ok(delete_backup_keys_for_session::v3::Response { count, etag }) +} + +async fn get_count_etag( + services: &Services, + sender_user: &UserId, + version: &str, +) -> Result<(UInt, String)> { + let count = services + .key_backups + .count_keys(sender_user, version) + .map(TryInto::try_into); + + let etag = services.key_backups.get_etag(sender_user, version).map(Ok); + + Ok(try_join(count, etag).await?) } From 116f85360fa17b16e8e9353b20be5e21dc87e2de Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 26 Apr 2025 08:24:47 +0000 Subject: [PATCH 077/270] Toward abstracting Pdu into trait Event. Co-authored-by: Jade Ellis Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 11 +- src/admin/user/commands.rs | 8 +- src/api/client/context.rs | 12 +- src/api/client/membership.rs | 5 +- src/api/client/message.rs | 4 +- src/api/client/relations.rs | 4 +- src/api/client/room/event.rs | 2 +- src/api/client/room/initial_sync.rs | 6 +- src/api/client/search.rs | 6 +- src/api/client/state.rs | 18 +- src/api/client/sync/v3.rs | 13 +- src/api/client/sync/v4.rs | 7 +- src/api/client/sync/v5.rs | 12 +- src/api/client/threads.rs | 7 +- src/api/server/invite.rs | 7 +- src/core/matrix/event.rs | 141 +++++++--- src/core/matrix/event/content.rs | 21 ++ src/core/matrix/event/format.rs | 219 +++++++++++++++ src/core/matrix/event/redact.rs | 86 ++++++ src/core/matrix/event/type_ext.rs | 32 +++ src/core/matrix/mod.rs | 8 +- src/core/matrix/pdu.rs | 123 +++++++-- src/core/matrix/pdu/id.rs | 1 + src/core/matrix/pdu/redact.rs | 116 +------- src/core/matrix/pdu/strip.rs | 257 ------------------ src/core/matrix/{pdu => }/state_key.rs | 3 - src/core/matrix/state_res/benches.rs | 167 ++---------- src/core/matrix/state_res/event_auth.rs | 61 +++-- src/core/matrix/state_res/mod.rs | 29 +- src/core/matrix/state_res/test_utils.rs | 192 +++---------- src/core/mod.rs | 4 +- src/service/admin/mod.rs | 21 +- src/service/pusher/mod.rs | 56 ++-- .../rooms/event_handler/handle_outlier_pdu.rs | 2 +- .../event_handler/upgrade_outlier_pdu.rs | 4 +- src/service/rooms/search/mod.rs | 4 +- src/service/rooms/spaces/mod.rs | 8 +- src/service/rooms/state/mod.rs | 22 +- src/service/rooms/threads/mod.rs | 16 +- src/service/rooms/timeline/mod.rs | 7 +- src/service/sending/sender.rs | 6 +- 41 files changed, 842 insertions(+), 886 deletions(-) create mode 100644 src/core/matrix/event/content.rs create mode 100644 src/core/matrix/event/format.rs create mode 100644 src/core/matrix/event/redact.rs create mode 100644 src/core/matrix/event/type_ext.rs delete mode 100644 src/core/matrix/pdu/strip.rs rename src/core/matrix/{pdu => }/state_key.rs (67%) diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index a397e0fc..2323e3b8 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -7,7 +7,10 @@ use std::{ use conduwuit::{ Err, Result, debug_error, err, info, - matrix::pdu::{PduEvent, PduId, RawPduId}, + matrix::{ + Event, + pdu::{PduEvent, PduId, RawPduId}, + }, trace, utils, utils::{ stream::{IterStream, ReadyExt}, @@ -19,7 +22,7 @@ use futures::{FutureExt, StreamExt, TryStreamExt}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedServerName, RoomId, RoomVersionId, - api::federation::event::get_room_state, + api::federation::event::get_room_state, events::AnyStateEvent, serde::Raw, }; use service::rooms::{ short::{ShortEventId, ShortRoomId}, @@ -296,12 +299,12 @@ pub(super) async fn get_remote_pdu( #[admin_command] pub(super) async fn get_room_state(&self, room: OwnedRoomOrAliasId) -> Result { let room_id = self.services.rooms.alias.resolve(&room).await?; - let room_state: Vec<_> = self + let room_state: Vec> = self .services .rooms .state_accessor .room_state_full_pdus(&room_id) - .map_ok(PduEvent::into_state_event) + .map_ok(Event::into_format) .try_collect() .await?; diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 89f7a9fc..3750d758 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -1,13 +1,15 @@ use std::{collections::BTreeMap, fmt::Write as _}; -use api::client::{full_user_deactivate, join_room_by_id_helper, leave_room}; +use api::client::{ + full_user_deactivate, join_room_by_id_helper, leave_all_rooms, leave_room, update_avatar_url, + update_displayname, +}; use conduwuit::{ Err, Result, debug, debug_warn, error, info, is_equal_to, - matrix::pdu::PduBuilder, + matrix::{Event, pdu::PduBuilder}, utils::{self, ReadyExt}, warn, }; -use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; use futures::{FutureExt, StreamExt}; use ruma::{ OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId, diff --git a/src/api/client/context.rs b/src/api/client/context.rs index ca787a16..4a7d34d2 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -1,8 +1,6 @@ use axum::extract::State; use conduwuit::{ - Err, Result, at, debug_warn, err, - matrix::pdu::PduEvent, - ref_at, + Err, Event, Result, at, debug_warn, err, ref_at, utils::{ IterStream, future::TryExtExt, @@ -179,12 +177,12 @@ pub(crate) async fn get_context_route( .broad_filter_map(|event_id: &OwnedEventId| { services.rooms.timeline.get_pdu(event_id.as_ref()).ok() }) - .map(PduEvent::into_state_event) + .map(Event::into_format) .collect() .await; Ok(get_context::v3::Response { - event: base_event.map(at!(1)).map(PduEvent::into_room_event), + event: base_event.map(at!(1)).map(Event::into_format), start: events_before .last() @@ -203,13 +201,13 @@ pub(crate) async fn get_context_route( events_before: events_before .into_iter() .map(at!(1)) - .map(PduEvent::into_room_event) + .map(Event::into_format) .collect(), events_after: events_after .into_iter() .map(at!(1)) - .map(PduEvent::into_room_event) + .map(Event::into_format) .collect(), state, diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 85d0cd21..3c2a6fe3 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -9,7 +9,8 @@ use std::{ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Result, at, debug, debug_error, debug_info, debug_warn, err, error, info, is_matching, + Err, Event, Result, at, debug, debug_error, debug_info, debug_warn, err, error, info, + is_matching, matrix::{ StateKey, pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, @@ -880,7 +881,7 @@ pub(crate) async fn get_member_events_route( .ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember) .map(at!(1)) .ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership)) - .map(PduEvent::into_member_event) + .map(Event::into_format) .collect() .await, }) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index 7a87a9b0..e32d020f 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -175,7 +175,7 @@ pub(crate) async fn get_message_events_route( let chunk = events .into_iter() .map(at!(1)) - .map(PduEvent::into_room_event) + .map(Event::into_format) .collect(); Ok(get_message_events::v3::Response { @@ -241,7 +241,7 @@ async fn get_member_event( .rooms .state_accessor .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str()) - .map_ok(PduEvent::into_state_event) + .map_ok(Event::into_format) .await .ok() } diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index b8c2dd4d..ad726b90 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,7 +1,7 @@ use axum::extract::State; use conduwuit::{ Result, at, - matrix::pdu::PduCount, + matrix::{Event, pdu::PduCount}, utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt}, }; use conduwuit_service::{Services, rooms::timeline::PdusIterItem}; @@ -167,7 +167,7 @@ async fn paginate_relations_with_filter( chunk: events .into_iter() .map(at!(1)) - .map(|pdu| pdu.to_message_like_event()) + .map(Event::into_format) .collect(), }) } diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index 2b115b5c..47228d67 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -40,5 +40,5 @@ pub(crate) async fn get_room_event_route( event.add_age().ok(); - Ok(get_room_event::v3::Response { event: event.into_room_event() }) + Ok(get_room_event::v3::Response { event: event.into_format() }) } diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index ca63610b..8b9f3ca0 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - Err, PduEvent, Result, at, + Err, Event, Result, at, utils::{BoolExt, stream::TryTools}, }; use futures::TryStreamExt; @@ -38,7 +38,7 @@ pub(crate) async fn room_initial_sync_route( .rooms .state_accessor .room_state_full_pdus(room_id) - .map_ok(PduEvent::into_state_event) + .map_ok(Event::into_format) .try_collect() .await?; @@ -55,7 +55,7 @@ pub(crate) async fn room_initial_sync_route( chunk: events .into_iter() .map(at!(1)) - .map(PduEvent::into_room_event) + .map(Event::into_format) .collect(), }; diff --git a/src/api/client/search.rs b/src/api/client/search.rs index d4dcde57..cc745694 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ Err, Result, at, is_true, - matrix::pdu::PduEvent, + matrix::Event, result::FlatOk, utils::{IterStream, stream::ReadyExt}, }; @@ -144,7 +144,7 @@ async fn category_room_events( .map(at!(2)) .flatten() .stream() - .map(PduEvent::into_room_event) + .map(Event::into_format) .map(|result| SearchResult { rank: None, result: Some(result), @@ -185,7 +185,7 @@ async fn procure_room_state(services: &Services, room_id: &RoomId) -> Result>(); let account_data_events = services @@ -877,10 +875,7 @@ async fn load_joined_room( events: room_events, }, state: RoomState { - events: state_events - .into_iter() - .map(PduEvent::into_sync_state_event) - .collect(), + events: state_events.into_iter().map(Event::into_format).collect(), }, ephemeral: Ephemeral { events: edus }, unread_thread_notifications: BTreeMap::new(), diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index f153b2da..cabd67e4 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - Err, Error, PduCount, PduEvent, Result, debug, error, extract_variant, + Err, Error, Event, PduCount, PduEvent, Result, at, debug, error, extract_variant, matrix::TypeStateKey, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, @@ -604,7 +604,8 @@ pub(crate) async fn sync_events_v4_route( .iter() .stream() .filter_map(|item| ignored_filter(&services, item.clone(), sender_user)) - .map(|(_, pdu)| pdu.to_sync_room_event()) + .map(at!(1)) + .map(Event::into_format) .collect() .await; @@ -626,7 +627,7 @@ pub(crate) async fn sync_events_v4_route( .state_accessor .room_state_get(room_id, &state.0, &state.1) .await - .map(PduEvent::into_sync_state_event) + .map(PduEvent::into_format) .ok() }) .collect() diff --git a/src/api/client/sync/v5.rs b/src/api/client/sync/v5.rs index f3fc0f44..e4cefba0 100644 --- a/src/api/client/sync/v5.rs +++ b/src/api/client/sync/v5.rs @@ -7,11 +7,8 @@ use std::{ use axum::extract::State; use conduwuit::{ - Err, Error, Result, error, extract_variant, is_equal_to, - matrix::{ - TypeStateKey, - pdu::{PduCount, PduEvent}, - }, + Err, Error, Result, at, error, extract_variant, is_equal_to, + matrix::{Event, TypeStateKey, pdu::PduCount}, trace, utils::{ BoolExt, FutureBoolExt, IterStream, ReadyExt, TryFutureExtExt, @@ -515,7 +512,8 @@ where .iter() .stream() .filter_map(|item| ignored_filter(services, item.clone(), sender_user)) - .map(|(_, pdu)| pdu.to_sync_room_event()) + .map(at!(1)) + .map(Event::into_format) .collect() .await; @@ -537,7 +535,7 @@ where .state_accessor .room_state_get(room_id, &state.0, &state.1) .await - .map(PduEvent::into_sync_state_event) + .map(Event::into_format) .ok() }) .collect() diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index 5b838bef..ca176eda 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,7 +1,10 @@ use axum::extract::State; use conduwuit::{ Result, at, - matrix::pdu::{PduCount, PduEvent}, + matrix::{ + Event, + pdu::{PduCount, PduEvent}, + }, }; use futures::StreamExt; use ruma::{api::client::threads::get_threads, uint}; @@ -56,7 +59,7 @@ pub(crate) async fn get_threads_route( chunk: threads .into_iter() .map(at!(1)) - .map(PduEvent::into_room_event) + .map(Event::into_format) .collect(), }) } diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 01961378..0d26d787 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -2,7 +2,8 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use base64::{Engine as _, engine::general_purpose}; use conduwuit::{ - Err, Error, PduEvent, Result, err, pdu::gen_event_id, utils, utils::hash::sha256, warn, + Err, Error, PduEvent, Result, err, matrix::Event, pdu::gen_event_id, utils, + utils::hash::sha256, warn, }; use ruma::{ CanonicalJsonValue, OwnedUserId, UserId, @@ -111,7 +112,7 @@ pub(crate) async fn create_invite_route( let pdu: PduEvent = serde_json::from_value(event.into()) .map_err(|e| err!(Request(BadJson("Invalid invite event PDU: {e}"))))?; - invite_state.push(pdu.to_stripped_state_event()); + invite_state.push(pdu.to_format()); // If we are active in the room, the remote server will notify us about the // join/invite through /send. If we are not in the room, we need to manually @@ -144,7 +145,7 @@ pub(crate) async fn create_invite_route( .send_appservice_request( appservice.registration.clone(), ruma::api::appservice::event::push_events::v1::Request { - events: vec![pdu.to_room_event()], + events: vec![pdu.to_format()], txn_id: general_purpose::URL_SAFE_NO_PAD .encode(sha256::hash(pdu.event_id.as_bytes())) .into(), diff --git a/src/core/matrix/event.rs b/src/core/matrix/event.rs index e4c478cd..5b12770b 100644 --- a/src/core/matrix/event.rs +++ b/src/core/matrix/event.rs @@ -1,63 +1,114 @@ -use ruma::{EventId, MilliSecondsSinceUnixEpoch, RoomId, UserId, events::TimelineEventType}; -use serde_json::value::RawValue as RawJsonValue; +mod content; +mod format; +mod redact; +mod type_ext; + +use ruma::{ + EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, UserId, + events::TimelineEventType, +}; +use serde::Deserialize; +use serde_json::{Value as JsonValue, value::RawValue as RawJsonValue}; + +pub use self::type_ext::TypeExt; +use super::state_key::StateKey; +use crate::Result; /// Abstraction of a PDU so users can have their own PDU types. pub trait Event { + /// Serialize into a Ruma JSON format, consuming. + #[inline] + fn into_format(self) -> T + where + T: From>, + Self: Sized, + { + format::Owned(self).into() + } + + /// Serialize into a Ruma JSON format + #[inline] + fn to_format<'a, T>(&'a self) -> T + where + T: From>, + Self: Sized + 'a, + { + format::Ref(self).into() + } + + #[inline] + fn get_content_as_value(&self) -> JsonValue + where + Self: Sized, + { + content::as_value(self) + } + + #[inline] + fn get_content(&self) -> Result + where + for<'de> T: Deserialize<'de>, + Self: Sized, + { + content::get::(self) + } + + #[inline] + fn redacts_id(&self, room_version: &RoomVersionId) -> Option + where + Self: Sized, + { + redact::redacts_id(self, room_version) + } + + #[inline] + fn is_redacted(&self) -> bool + where + Self: Sized, + { + redact::is_redacted(self) + } + + fn is_owned(&self) -> bool; + + // + // Canonical properties + // + + /// All the authenticating events for this event. + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_; + + /// The event's content. + fn content(&self) -> &RawJsonValue; + /// The `EventId` of this event. fn event_id(&self) -> &EventId; + /// The time of creation on the originating server. + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch; + + /// The events before this event. + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_; + + /// If this event is a redaction event this is the event it redacts. + fn redacts(&self) -> Option<&EventId>; + /// The `RoomId` of this event. fn room_id(&self) -> &RoomId; /// The `UserId` of this event. fn sender(&self) -> &UserId; - /// The time of creation on the originating server. - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch; - - /// The event type. - fn event_type(&self) -> &TimelineEventType; - - /// The event's content. - fn content(&self) -> &RawJsonValue; - /// The state key for this event. fn state_key(&self) -> Option<&str>; - /// The events before this event. - // Requires GATs to avoid boxing (and TAIT for making it convenient). - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_; + /// The event type. + fn kind(&self) -> &TimelineEventType; - /// All the authenticating events for this event. - // Requires GATs to avoid boxing (and TAIT for making it convenient). - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_; + /// Metadata container; peer-trusted only. + fn unsigned(&self) -> Option<&RawJsonValue>; - /// If this event is a redaction event this is the event it redacts. - fn redacts(&self) -> Option<&EventId>; -} - -impl Event for &T { - fn event_id(&self) -> &EventId { (*self).event_id() } - - fn room_id(&self) -> &RoomId { (*self).room_id() } - - fn sender(&self) -> &UserId { (*self).sender() } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { (*self).origin_server_ts() } - - fn event_type(&self) -> &TimelineEventType { (*self).event_type() } - - fn content(&self) -> &RawJsonValue { (*self).content() } - - fn state_key(&self) -> Option<&str> { (*self).state_key() } - - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - (*self).prev_events() - } - - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { - (*self).auth_events() - } - - fn redacts(&self) -> Option<&EventId> { (*self).redacts() } + //#[deprecated] + #[inline] + fn event_type(&self) -> &TimelineEventType { self.kind() } } diff --git a/src/core/matrix/event/content.rs b/src/core/matrix/event/content.rs new file mode 100644 index 00000000..1ee7ebd2 --- /dev/null +++ b/src/core/matrix/event/content.rs @@ -0,0 +1,21 @@ +use serde::Deserialize; +use serde_json::value::Value as JsonValue; + +use super::Event; +use crate::{Result, err}; + +#[inline] +#[must_use] +pub(super) fn as_value(event: &E) -> JsonValue { + get(event).expect("Failed to represent Event content as JsonValue") +} + +#[inline] +pub(super) fn get(event: &E) -> Result +where + T: for<'de> Deserialize<'de>, + E: Event, +{ + serde_json::from_str(event.content().get()) + .map_err(|e| err!(Request(BadJson("Failed to deserialize content into type: {e}")))) +} diff --git a/src/core/matrix/event/format.rs b/src/core/matrix/event/format.rs new file mode 100644 index 00000000..988cf4f0 --- /dev/null +++ b/src/core/matrix/event/format.rs @@ -0,0 +1,219 @@ +use ruma::{ + events::{ + AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, + AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, room::member::RoomMemberEventContent, + space::child::HierarchySpaceChildEvent, + }, + serde::Raw, +}; +use serde_json::json; + +use super::{Event, redact}; + +pub struct Owned(pub(super) E); + +pub struct Ref<'a, E: Event>(pub(super) &'a E); + +impl From> for Raw { + fn from(event: Owned) -> Self { Ref(&event.0).into() } +} + +impl<'a, E: Event> From> for Raw { + fn from(event: Ref<'a, E>) -> Self { + let event = event.0; + let (redacts, content) = redact::copy(event); + let mut json = json!({ + "content": content, + "event_id": event.event_id(), + "origin_server_ts": event.origin_server_ts(), + "sender": event.sender(), + "type": event.event_type(), + }); + + if let Some(redacts) = redacts { + json["redacts"] = json!(redacts); + } + if let Some(state_key) = event.state_key() { + json["state_key"] = json!(state_key); + } + if let Some(unsigned) = event.unsigned() { + json["unsigned"] = json!(unsigned); + } + + serde_json::from_value(json).expect("Failed to serialize Event value") + } +} + +impl From> for Raw { + fn from(event: Owned) -> Self { Ref(&event.0).into() } +} + +impl<'a, E: Event> From> for Raw { + fn from(event: Ref<'a, E>) -> Self { + let event = event.0; + let (redacts, content) = redact::copy(event); + let mut json = json!({ + "content": content, + "event_id": event.event_id(), + "origin_server_ts": event.origin_server_ts(), + "room_id": event.room_id(), + "sender": event.sender(), + "type": event.kind(), + }); + + if let Some(redacts) = redacts { + json["redacts"] = json!(redacts); + } + if let Some(state_key) = event.state_key() { + json["state_key"] = json!(state_key); + } + if let Some(unsigned) = event.unsigned() { + json["unsigned"] = json!(unsigned); + } + + serde_json::from_value(json).expect("Failed to serialize Event value") + } +} + +impl From> for Raw { + fn from(event: Owned) -> Self { Ref(&event.0).into() } +} + +impl<'a, E: Event> From> for Raw { + fn from(event: Ref<'a, E>) -> Self { + let event = event.0; + let (redacts, content) = redact::copy(event); + let mut json = json!({ + "content": content, + "event_id": event.event_id(), + "origin_server_ts": event.origin_server_ts(), + "room_id": event.room_id(), + "sender": event.sender(), + "type": event.kind(), + }); + + if let Some(redacts) = &redacts { + json["redacts"] = json!(redacts); + } + if let Some(state_key) = event.state_key() { + json["state_key"] = json!(state_key); + } + if let Some(unsigned) = event.unsigned() { + json["unsigned"] = json!(unsigned); + } + + serde_json::from_value(json).expect("Failed to serialize Event value") + } +} + +impl From> for Raw { + fn from(event: Owned) -> Self { Ref(&event.0).into() } +} + +impl<'a, E: Event> From> for Raw { + fn from(event: Ref<'a, E>) -> Self { + let event = event.0; + let mut json = json!({ + "content": event.content(), + "event_id": event.event_id(), + "origin_server_ts": event.origin_server_ts(), + "room_id": event.room_id(), + "sender": event.sender(), + "state_key": event.state_key(), + "type": event.kind(), + }); + + if let Some(unsigned) = event.unsigned() { + json["unsigned"] = json!(unsigned); + } + + serde_json::from_value(json).expect("Failed to serialize Event value") + } +} + +impl From> for Raw { + fn from(event: Owned) -> Self { Ref(&event.0).into() } +} + +impl<'a, E: Event> From> for Raw { + fn from(event: Ref<'a, E>) -> Self { + let event = event.0; + let mut json = json!({ + "content": event.content(), + "event_id": event.event_id(), + "origin_server_ts": event.origin_server_ts(), + "sender": event.sender(), + "state_key": event.state_key(), + "type": event.kind(), + }); + + if let Some(unsigned) = event.unsigned() { + json["unsigned"] = json!(unsigned); + } + + serde_json::from_value(json).expect("Failed to serialize Event value") + } +} + +impl From> for Raw { + fn from(event: Owned) -> Self { Ref(&event.0).into() } +} + +impl<'a, E: Event> From> for Raw { + fn from(event: Ref<'a, E>) -> Self { + let event = event.0; + let json = json!({ + "content": event.content(), + "sender": event.sender(), + "state_key": event.state_key(), + "type": event.kind(), + }); + + serde_json::from_value(json).expect("Failed to serialize Event value") + } +} + +impl From> for Raw { + fn from(event: Owned) -> Self { Ref(&event.0).into() } +} + +impl<'a, E: Event> From> for Raw { + fn from(event: Ref<'a, E>) -> Self { + let event = event.0; + let json = json!({ + "content": event.content(), + "origin_server_ts": event.origin_server_ts(), + "sender": event.sender(), + "state_key": event.state_key(), + "type": event.kind(), + }); + + serde_json::from_value(json).expect("Failed to serialize Event value") + } +} + +impl From> for Raw> { + fn from(event: Owned) -> Self { Ref(&event.0).into() } +} + +impl<'a, E: Event> From> for Raw> { + fn from(event: Ref<'a, E>) -> Self { + let event = event.0; + let mut json = json!({ + "content": event.content(), + "event_id": event.event_id(), + "origin_server_ts": event.origin_server_ts(), + "redacts": event.redacts(), + "room_id": event.room_id(), + "sender": event.sender(), + "state_key": event.state_key(), + "type": event.kind(), + }); + + if let Some(unsigned) = event.unsigned() { + json["unsigned"] = json!(unsigned); + } + + serde_json::from_value(json).expect("Failed to serialize Event value") + } +} diff --git a/src/core/matrix/event/redact.rs b/src/core/matrix/event/redact.rs new file mode 100644 index 00000000..5deac874 --- /dev/null +++ b/src/core/matrix/event/redact.rs @@ -0,0 +1,86 @@ +use ruma::{ + OwnedEventId, RoomVersionId, + events::{TimelineEventType, room::redaction::RoomRedactionEventContent}, +}; +use serde::Deserialize; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; + +use super::Event; + +/// Copies the `redacts` property of the event to the `content` dict and +/// vice-versa. +/// +/// This follows the specification's +/// [recommendation](https://spec.matrix.org/v1.10/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property): +/// +/// > For backwards-compatibility with older clients, servers should add a +/// > redacts property to the top level of m.room.redaction events in when +/// > serving such events over the Client-Server API. +/// +/// > For improved compatibility with newer clients, servers should add a +/// > redacts property to the content of m.room.redaction events in older +/// > room versions when serving such events over the Client-Server API. +#[must_use] +pub(super) fn copy(event: &E) -> (Option, Box) { + if *event.event_type() != TimelineEventType::RoomRedaction { + return (event.redacts().map(ToOwned::to_owned), event.content().to_owned()); + } + + let Ok(mut content) = event.get_content::() else { + return (event.redacts().map(ToOwned::to_owned), event.content().to_owned()); + }; + + if let Some(redacts) = content.redacts { + return (Some(redacts), event.content().to_owned()); + } + + if let Some(redacts) = event.redacts().map(ToOwned::to_owned) { + content.redacts = Some(redacts); + return ( + event.redacts().map(ToOwned::to_owned), + to_raw_value(&content).expect("Must be valid, we only added redacts field"), + ); + } + + (event.redacts().map(ToOwned::to_owned), event.content().to_owned()) +} + +#[must_use] +pub(super) fn is_redacted(event: &E) -> bool { + let Some(unsigned) = event.unsigned() else { + return false; + }; + + let Ok(unsigned) = ExtractRedactedBecause::deserialize(unsigned) else { + return false; + }; + + unsigned.redacted_because.is_some() +} + +#[must_use] +pub(super) fn redacts_id( + event: &E, + room_version: &RoomVersionId, +) -> Option { + use RoomVersionId::*; + + if *event.kind() != TimelineEventType::RoomRedaction { + return None; + } + + match *room_version { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => + event.redacts().map(ToOwned::to_owned), + | _ => + event + .get_content::() + .ok()? + .redacts, + } +} + +#[derive(Deserialize)] +struct ExtractRedactedBecause { + redacted_because: Option, +} diff --git a/src/core/matrix/event/type_ext.rs b/src/core/matrix/event/type_ext.rs new file mode 100644 index 00000000..9b824d41 --- /dev/null +++ b/src/core/matrix/event/type_ext.rs @@ -0,0 +1,32 @@ +use ruma::events::{StateEventType, TimelineEventType}; + +use super::StateKey; + +/// Convenience trait for adding event type plus state key to state maps. +pub trait TypeExt { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey); +} + +impl TypeExt for StateEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { + (self, state_key.into()) + } +} + +impl TypeExt for &StateEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { + (self.clone(), state_key.into()) + } +} + +impl TypeExt for TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { + (self.into(), state_key.into()) + } +} + +impl TypeExt for &TimelineEventType { + fn with_state_key(self, state_key: impl Into) -> (StateEventType, StateKey) { + (self.clone().into(), state_key.into()) + } +} diff --git a/src/core/matrix/mod.rs b/src/core/matrix/mod.rs index 8c978173..b38d4c9a 100644 --- a/src/core/matrix/mod.rs +++ b/src/core/matrix/mod.rs @@ -2,8 +2,10 @@ pub mod event; pub mod pdu; +pub mod state_key; pub mod state_res; -pub use event::Event; -pub use pdu::{PduBuilder, PduCount, PduEvent, PduId, RawPduId, StateKey}; -pub use state_res::{EventTypeExt, RoomVersion, StateMap, TypeStateKey}; +pub use event::{Event, TypeExt as EventTypeExt}; +pub use pdu::{Pdu, PduBuilder, PduCount, PduEvent, PduId, RawPduId, ShortId}; +pub use state_key::StateKey; +pub use state_res::{RoomVersion, StateMap, TypeStateKey}; diff --git a/src/core/matrix/pdu.rs b/src/core/matrix/pdu.rs index 188586bd..e64baeb8 100644 --- a/src/core/matrix/pdu.rs +++ b/src/core/matrix/pdu.rs @@ -7,8 +7,6 @@ mod id; mod raw_id; mod redact; mod relation; -mod state_key; -mod strip; #[cfg(test)] mod tests; mod unsigned; @@ -27,37 +25,50 @@ pub use self::{ builder::{Builder, Builder as PduBuilder}, count::Count, event_id::*, - id::*, + id::{ShortId, *}, raw_id::*, - state_key::{ShortStateKey, StateKey}, }; -use super::Event; +use super::{Event, StateKey}; use crate::Result; /// Persistent Data Unit (Event) #[derive(Clone, Deserialize, Serialize, Debug)] pub struct Pdu { pub event_id: OwnedEventId, + pub room_id: OwnedRoomId, + pub sender: OwnedUserId, + #[serde(skip_serializing_if = "Option::is_none")] pub origin: Option, + pub origin_server_ts: UInt, + #[serde(rename = "type")] pub kind: TimelineEventType, + pub content: Box, + #[serde(skip_serializing_if = "Option::is_none")] pub state_key: Option, + pub prev_events: Vec, + pub depth: UInt, + pub auth_events: Vec, + #[serde(skip_serializing_if = "Option::is_none")] pub redacts: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] pub unsigned: Option>, + pub hashes: EventHash, - #[serde(default, skip_serializing_if = "Option::is_none")] + // BTreeMap, BTreeMap> + #[serde(default, skip_serializing_if = "Option::is_none")] pub signatures: Option>, } @@ -79,31 +90,91 @@ impl Pdu { } impl Event for Pdu { - fn event_id(&self) -> &EventId { &self.event_id } - - fn room_id(&self) -> &RoomId { &self.room_id } - - fn sender(&self) -> &UserId { &self.sender } - - fn event_type(&self) -> &TimelineEventType { &self.kind } - - fn content(&self) -> &RawJsonValue { &self.content } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - MilliSecondsSinceUnixEpoch(self.origin_server_ts) - } - - fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } - - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { - self.prev_events.iter().map(AsRef::as_ref) - } - + #[inline] fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { self.auth_events.iter().map(AsRef::as_ref) } + #[inline] + fn content(&self) -> &RawJsonValue { &self.content } + + #[inline] + fn event_id(&self) -> &EventId { &self.event_id } + + #[inline] + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + MilliSecondsSinceUnixEpoch(self.origin_server_ts) + } + + #[inline] + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.prev_events.iter().map(AsRef::as_ref) + } + + #[inline] fn redacts(&self) -> Option<&EventId> { self.redacts.as_deref() } + + #[inline] + fn room_id(&self) -> &RoomId { &self.room_id } + + #[inline] + fn sender(&self) -> &UserId { &self.sender } + + #[inline] + fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } + + #[inline] + fn kind(&self) -> &TimelineEventType { &self.kind } + + #[inline] + fn unsigned(&self) -> Option<&RawJsonValue> { self.unsigned.as_deref() } + + #[inline] + fn is_owned(&self) -> bool { true } +} + +impl Event for &Pdu { + #[inline] + fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.auth_events.iter().map(AsRef::as_ref) + } + + #[inline] + fn content(&self) -> &RawJsonValue { &self.content } + + #[inline] + fn event_id(&self) -> &EventId { &self.event_id } + + #[inline] + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + MilliSecondsSinceUnixEpoch(self.origin_server_ts) + } + + #[inline] + fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + self.prev_events.iter().map(AsRef::as_ref) + } + + #[inline] + fn redacts(&self) -> Option<&EventId> { self.redacts.as_deref() } + + #[inline] + fn room_id(&self) -> &RoomId { &self.room_id } + + #[inline] + fn sender(&self) -> &UserId { &self.sender } + + #[inline] + fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } + + #[inline] + fn kind(&self) -> &TimelineEventType { &self.kind } + + #[inline] + fn unsigned(&self) -> Option<&RawJsonValue> { self.unsigned.as_deref() } + + #[inline] + fn is_owned(&self) -> bool { false } } /// Prevent derived equality which wouldn't limit itself to event_id diff --git a/src/core/matrix/pdu/id.rs b/src/core/matrix/pdu/id.rs index 0b23a29f..896d677b 100644 --- a/src/core/matrix/pdu/id.rs +++ b/src/core/matrix/pdu/id.rs @@ -3,6 +3,7 @@ use crate::utils::u64_from_u8x8; pub type ShortRoomId = ShortId; pub type ShortEventId = ShortId; +pub type ShortStateKey = ShortId; pub type ShortId = u64; #[derive(Clone, Copy, Debug, Eq, PartialEq)] diff --git a/src/core/matrix/pdu/redact.rs b/src/core/matrix/pdu/redact.rs index 409debfe..e6a03209 100644 --- a/src/core/matrix/pdu/redact.rs +++ b/src/core/matrix/pdu/redact.rs @@ -1,117 +1,29 @@ -use ruma::{ - OwnedEventId, RoomVersionId, - canonical_json::redact_content_in_place, - events::{TimelineEventType, room::redaction::RoomRedactionEventContent}, -}; -use serde::Deserialize; -use serde_json::{ - json, - value::{RawValue as RawJsonValue, to_raw_value}, -}; +use ruma::{RoomVersionId, canonical_json::redact_content_in_place}; +use serde_json::{json, value::to_raw_value}; -use crate::{Error, Result, implement}; - -#[derive(Deserialize)] -struct ExtractRedactedBecause { - redacted_because: Option, -} +use crate::{Error, Result, err, implement}; #[implement(super::Pdu)] pub fn redact(&mut self, room_version_id: &RoomVersionId, reason: &Self) -> Result { self.unsigned = None; let mut content = serde_json::from_str(self.content.get()) - .map_err(|_| Error::bad_database("PDU in db has invalid content."))?; + .map_err(|e| err!(Request(BadJson("Failed to deserialize content into type: {e}"))))?; redact_content_in_place(&mut content, room_version_id, self.kind.to_string()) .map_err(|e| Error::Redaction(self.sender.server_name().to_owned(), e))?; - self.unsigned = Some( - to_raw_value(&json!({ - "redacted_because": serde_json::to_value(reason).expect("to_value(Pdu) always works") - })) - .expect("to string always works"), - ); + let reason = serde_json::to_value(reason).expect("Failed to preserialize reason"); - self.content = to_raw_value(&content).expect("to string always works"); + let redacted_because = json!({ + "redacted_because": reason, + }); + + self.unsigned = to_raw_value(&redacted_because) + .expect("Failed to serialize unsigned") + .into(); + + self.content = to_raw_value(&content).expect("Failed to serialize content"); Ok(()) } - -#[implement(super::Pdu)] -#[must_use] -pub fn is_redacted(&self) -> bool { - let Some(unsigned) = &self.unsigned else { - return false; - }; - - let Ok(unsigned) = ExtractRedactedBecause::deserialize(&**unsigned) else { - return false; - }; - - unsigned.redacted_because.is_some() -} - -/// Copies the `redacts` property of the event to the `content` dict and -/// vice-versa. -/// -/// This follows the specification's -/// [recommendation](https://spec.matrix.org/v1.10/rooms/v11/#moving-the-redacts-property-of-mroomredaction-events-to-a-content-property): -/// -/// > For backwards-compatibility with older clients, servers should add a -/// > redacts -/// > property to the top level of m.room.redaction events in when serving -/// > such events -/// > over the Client-Server API. -/// -/// > For improved compatibility with newer clients, servers should add a -/// > redacts property -/// > to the content of m.room.redaction events in older room versions when -/// > serving -/// > such events over the Client-Server API. -#[implement(super::Pdu)] -#[must_use] -pub fn copy_redacts(&self) -> (Option, Box) { - if self.kind == TimelineEventType::RoomRedaction { - if let Ok(mut content) = - serde_json::from_str::(self.content.get()) - { - match content.redacts { - | Some(redacts) => { - return (Some(redacts), self.content.clone()); - }, - | _ => match self.redacts.clone() { - | Some(redacts) => { - content.redacts = Some(redacts); - return ( - self.redacts.clone(), - to_raw_value(&content) - .expect("Must be valid, we only added redacts field"), - ); - }, - | _ => {}, - }, - } - } - } - - (self.redacts.clone(), self.content.clone()) -} - -#[implement(super::Pdu)] -#[must_use] -pub fn redacts_id(&self, room_version: &RoomVersionId) -> Option { - use RoomVersionId::*; - - if self.kind != TimelineEventType::RoomRedaction { - return None; - } - - match *room_version { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => self.redacts.clone(), - | _ => - self.get_content::() - .ok()? - .redacts, - } -} diff --git a/src/core/matrix/pdu/strip.rs b/src/core/matrix/pdu/strip.rs deleted file mode 100644 index a39e7d35..00000000 --- a/src/core/matrix/pdu/strip.rs +++ /dev/null @@ -1,257 +0,0 @@ -use ruma::{ - events::{ - AnyMessageLikeEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncStateEvent, - AnySyncTimelineEvent, AnyTimelineEvent, StateEvent, room::member::RoomMemberEventContent, - space::child::HierarchySpaceChildEvent, - }, - serde::Raw, -}; -use serde_json::{json, value::Value as JsonValue}; - -use crate::implement; - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_room_event(self) -> Raw { self.to_room_event() } - -#[implement(super::Pdu)] -#[must_use] -pub fn to_room_event(&self) -> Raw { - let value = self.to_room_event_value(); - serde_json::from_value(value).expect("Failed to serialize Event value") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn to_room_event_value(&self) -> JsonValue { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - json -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_message_like_event(self) -> Raw { self.to_message_like_event() } - -#[implement(super::Pdu)] -#[must_use] -pub fn to_message_like_event(&self) -> Raw { - let value = self.to_message_like_event_value(); - serde_json::from_value(value).expect("Failed to serialize Event value") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn to_message_like_event_value(&self) -> JsonValue { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - json -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_sync_room_event(self) -> Raw { self.to_sync_room_event() } - -#[implement(super::Pdu)] -#[must_use] -pub fn to_sync_room_event(&self) -> Raw { - let value = self.to_sync_room_event_value(); - serde_json::from_value(value).expect("Failed to serialize Event value") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn to_sync_room_event_value(&self) -> JsonValue { - let (redacts, content) = self.copy_redacts(); - let mut json = json!({ - "content": content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - if let Some(state_key) = &self.state_key { - json["state_key"] = json!(state_key); - } - if let Some(redacts) = &redacts { - json["redacts"] = json!(redacts); - } - - json -} - -#[implement(super::Pdu)] -#[must_use] -pub fn into_state_event(self) -> Raw { - let value = self.into_state_event_value(); - serde_json::from_value(value).expect("Failed to serialize Event value") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_state_event_value(self) -> JsonValue { - let mut json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "room_id": self.room_id, - "state_key": self.state_key, - }); - - if let Some(unsigned) = self.unsigned { - json["unsigned"] = json!(unsigned); - } - - json -} - -#[implement(super::Pdu)] -#[must_use] -pub fn into_sync_state_event(self) -> Raw { - let value = self.into_sync_state_event_value(); - serde_json::from_value(value).expect("Failed to serialize Event value") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_sync_state_event_value(self) -> JsonValue { - let mut json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "state_key": self.state_key, - }); - - if let Some(unsigned) = &self.unsigned { - json["unsigned"] = json!(unsigned); - } - - json -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_stripped_state_event(self) -> Raw { - self.to_stripped_state_event() -} - -#[implement(super::Pdu)] -#[must_use] -pub fn to_stripped_state_event(&self) -> Raw { - let value = self.to_stripped_state_event_value(); - serde_json::from_value(value).expect("Failed to serialize Event value") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn to_stripped_state_event_value(&self) -> JsonValue { - json!({ - "content": self.content, - "type": self.kind, - "sender": self.sender, - "state_key": self.state_key, - }) -} - -#[implement(super::Pdu)] -#[must_use] -pub fn into_stripped_spacechild_state_event(self) -> Raw { - let value = self.into_stripped_spacechild_state_event_value(); - serde_json::from_value(value).expect("Failed to serialize Event value") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_stripped_spacechild_state_event_value(self) -> JsonValue { - json!({ - "content": self.content, - "type": self.kind, - "sender": self.sender, - "state_key": self.state_key, - "origin_server_ts": self.origin_server_ts, - }) -} - -#[implement(super::Pdu)] -#[must_use] -pub fn into_member_event(self) -> Raw> { - let value = self.into_member_event_value(); - serde_json::from_value(value).expect("Failed to serialize Event value") -} - -#[implement(super::Pdu)] -#[must_use] -#[inline] -pub fn into_member_event_value(self) -> JsonValue { - let mut json = json!({ - "content": self.content, - "type": self.kind, - "event_id": self.event_id, - "sender": self.sender, - "origin_server_ts": self.origin_server_ts, - "redacts": self.redacts, - "room_id": self.room_id, - "state_key": self.state_key, - }); - - if let Some(unsigned) = self.unsigned { - json["unsigned"] = json!(unsigned); - } - - json -} diff --git a/src/core/matrix/pdu/state_key.rs b/src/core/matrix/state_key.rs similarity index 67% rename from src/core/matrix/pdu/state_key.rs rename to src/core/matrix/state_key.rs index 4af4fcf7..06d614f8 100644 --- a/src/core/matrix/pdu/state_key.rs +++ b/src/core/matrix/state_key.rs @@ -1,8 +1,5 @@ use smallstr::SmallString; -use super::ShortId; - pub type StateKey = SmallString<[u8; INLINE_SIZE]>; -pub type ShortStateKey = ShortId; const INLINE_SIZE: usize = 48; diff --git a/src/core/matrix/state_res/benches.rs b/src/core/matrix/state_res/benches.rs index 12eeab9d..69088369 100644 --- a/src/core/matrix/state_res/benches.rs +++ b/src/core/matrix/state_res/benches.rs @@ -13,7 +13,6 @@ use ruma::{ EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, Signatures, UserId, events::{ StateEventType, TimelineEventType, - pdu::{EventHash, Pdu, RoomV3Pdu}, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, @@ -26,8 +25,10 @@ use serde_json::{ value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value}, }; -use self::event::PduEvent; -use crate::state_res::{self as state_res, Error, Event, Result, StateMap}; +use crate::{ + matrix::{Event, Pdu, pdu::EventHash}, + state_res::{self as state_res, Error, Result, StateMap}, +}; static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); @@ -60,7 +61,7 @@ fn resolution_shallow_auth_chain(c: &mut test::Bencher) { c.iter(|| async { let ev_map = store.0.clone(); let state_sets = [&state_at_bob, &state_at_charlie]; - let fetch = |id: OwnedEventId| ready(ev_map.get(&id).clone()); + let fetch = |id: OwnedEventId| ready(ev_map.get(&id).map(ToOwned::to_owned)); let exists = |id: OwnedEventId| ready(ev_map.get(&id).is_some()); let auth_chain_sets: Vec> = state_sets .iter() @@ -142,7 +143,7 @@ fn resolve_deeper_event_set(c: &mut test::Bencher) { }) .collect(); - let fetch = |id: OwnedEventId| ready(inner.get(&id).clone()); + let fetch = |id: OwnedEventId| ready(inner.get(&id).map(ToOwned::to_owned)); let exists = |id: OwnedEventId| ready(inner.get(&id).is_some()); let _ = match state_res::resolve( &RoomVersionId::V6, @@ -246,7 +247,7 @@ impl TestStore { } } -impl TestStore { +impl TestStore { #[allow(clippy::type_complexity)] fn set_up( &mut self, @@ -380,7 +381,7 @@ fn to_pdu_event( content: Box, auth_events: &[S], prev_events: &[S], -) -> PduEvent +) -> Pdu where S: AsRef, { @@ -403,30 +404,28 @@ where .map(event_id) .collect::>(); - let state_key = state_key.map(ToOwned::to_owned); - PduEvent { + Pdu { event_id: id.try_into().unwrap(), - rest: Pdu::RoomV3Pdu(RoomV3Pdu { - room_id: room_id().to_owned(), - sender: sender.to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), - state_key, - kind: ev_type, - content, - redacts: None, - unsigned: btreemap! {}, - auth_events, - prev_events, - depth: uint!(0), - hashes: EventHash::new(String::new()), - signatures: Signatures::new(), - }), + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: ts.try_into().unwrap(), + state_key: state_key.map(Into::into), + kind: ev_type, + content, + origin: None, + redacts: None, + unsigned: None, + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash { sha256: String::new() }, + signatures: None, } } // all graphs start with these input events #[allow(non_snake_case)] -fn INITIAL_EVENTS() -> HashMap { +fn INITIAL_EVENTS() -> HashMap { vec![ to_pdu_event::<&EventId>( "CREATE", @@ -508,7 +507,7 @@ fn INITIAL_EVENTS() -> HashMap { // all graphs start with these input events #[allow(non_snake_case)] -fn BAN_STATE_SET() -> HashMap { +fn BAN_STATE_SET() -> HashMap { vec![ to_pdu_event( "PA", @@ -551,119 +550,3 @@ fn BAN_STATE_SET() -> HashMap { .map(|ev| (ev.event_id().to_owned(), ev)) .collect() } - -/// Convenience trait for adding event type plus state key to state maps. -trait EventTypeExt { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String); -} - -impl EventTypeExt for &TimelineEventType { - fn with_state_key(self, state_key: impl Into) -> (StateEventType, String) { - (self.to_string().into(), state_key.into()) - } -} - -mod event { - use ruma::{ - EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, - events::{TimelineEventType, pdu::Pdu}, - }; - use serde::{Deserialize, Serialize}; - use serde_json::value::RawValue as RawJsonValue; - - use super::Event; - - impl Event for PduEvent { - fn event_id(&self) -> &EventId { &self.event_id } - - fn room_id(&self) -> &RoomId { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.room_id, - | Pdu::RoomV3Pdu(ev) => &ev.room_id, - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn sender(&self) -> &UserId { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.sender, - | Pdu::RoomV3Pdu(ev) => &ev.sender, - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn event_type(&self) -> &TimelineEventType { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.kind, - | Pdu::RoomV3Pdu(ev) => &ev.kind, - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn content(&self) -> &RawJsonValue { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.content, - | Pdu::RoomV3Pdu(ev) => &ev.content, - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, - | Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn state_key(&self) -> Option<&str> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), - | Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn prev_events(&self) -> Box + Send + '_> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => - Box::new(ev.prev_events.iter().map(|(id, _)| id.as_ref())), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter().map(AsRef::as_ref)), - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn auth_events(&self) -> Box + Send + '_> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => - Box::new(ev.auth_events.iter().map(|(id, _)| id.as_ref())), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter().map(AsRef::as_ref)), - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - - fn redacts(&self) -> Option<&EventId> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.redacts.as_deref(), - | Pdu::RoomV3Pdu(ev) => ev.redacts.as_deref(), - #[cfg(not(feature = "unstable-exhaustive-types"))] - | _ => unreachable!("new PDU version"), - } - } - } - - #[derive(Clone, Debug, Deserialize, Serialize)] - pub(crate) struct PduEvent { - pub(crate) event_id: OwnedEventId, - #[serde(flatten)] - pub(crate) rest: Pdu, - } -} diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 759ab5cb..8c760860 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -136,17 +136,17 @@ pub fn auth_types_for_event( event_id = incoming_event.event_id().as_str(), ) )] -pub async fn auth_check( +pub async fn auth_check( room_version: &RoomVersion, - incoming_event: &Incoming, - current_third_party_invite: Option<&Incoming>, + incoming_event: &E, + current_third_party_invite: Option<&E>, fetch_state: F, ) -> Result where F: Fn(&StateEventType, &str) -> Fut + Send, - Fut: Future> + Send, - Fetched: Event + Send, - Incoming: Event + Send + Sync, + Fut: Future> + Send, + E: Event + Send + Sync, + for<'a> &'a E: Event + Send, { debug!( event_id = format!("{}", incoming_event.event_id()), @@ -514,20 +514,24 @@ where /// event and the current State. #[allow(clippy::too_many_arguments)] #[allow(clippy::cognitive_complexity)] -fn valid_membership_change( +fn valid_membership_change( room_version: &RoomVersion, target_user: &UserId, - target_user_membership_event: Option<&impl Event>, + target_user_membership_event: Option<&E>, sender: &UserId, - sender_membership_event: Option<&impl Event>, - current_event: impl Event, - current_third_party_invite: Option<&impl Event>, - power_levels_event: Option<&impl Event>, - join_rules_event: Option<&impl Event>, + sender_membership_event: Option<&E>, + current_event: &E, + current_third_party_invite: Option<&E>, + power_levels_event: Option<&E>, + join_rules_event: Option<&E>, user_for_join_auth: Option<&UserId>, user_for_join_auth_membership: &MembershipState, - create_room: &impl Event, -) -> Result { + create_room: &E, +) -> Result +where + E: Event + Send + Sync, + for<'a> &'a E: Event + Send, +{ #[derive(Deserialize)] struct GetThirdPartyInvite { third_party_invite: Option>, @@ -820,7 +824,7 @@ fn valid_membership_change( /// /// Does the event have the correct userId as its state_key if it's not the "" /// state_key. -fn can_send_event(event: impl Event, ple: Option, user_level: Int) -> bool { +fn can_send_event(event: &impl Event, ple: Option<&impl Event>, user_level: Int) -> bool { let event_type_power_level = get_send_level(event.event_type(), event.state_key(), ple); debug!( @@ -846,8 +850,8 @@ fn can_send_event(event: impl Event, ple: Option, user_level: Int) - /// Confirm that the event sender has the required power levels. fn check_power_levels( room_version: &RoomVersion, - power_event: impl Event, - previous_power_event: Option, + power_event: &impl Event, + previous_power_event: Option<&impl Event>, user_level: Int, ) -> Option { match power_event.state_key() { @@ -1010,7 +1014,7 @@ fn get_deserialize_levels( /// given event. fn check_redaction( _room_version: &RoomVersion, - redaction_event: impl Event, + redaction_event: &impl Event, user_level: Int, redact_level: Int, ) -> Result { @@ -1039,7 +1043,7 @@ fn check_redaction( fn get_send_level( e_type: &TimelineEventType, state_key: Option<&str>, - power_lvl: Option, + power_lvl: Option<&impl Event>, ) -> Int { power_lvl .and_then(|ple| { @@ -1062,7 +1066,7 @@ fn verify_third_party_invite( target_user: Option<&UserId>, sender: &UserId, tp_id: &ThirdPartyInvite, - current_third_party_invite: Option, + current_third_party_invite: Option<&impl Event>, ) -> bool { // 1. Check for user being banned happens before this is called // checking for mxid and token keys is done by ruma when deserializing @@ -1128,12 +1132,15 @@ mod tests { }; use serde_json::value::to_raw_value as to_raw_json_value; - use crate::state_res::{ - Event, EventTypeExt, RoomVersion, StateMap, - event_auth::valid_membership_change, - test_utils::{ - INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, PduEvent, alice, charlie, ella, event_id, - member_content_ban, member_content_join, room_id, to_pdu_event, + use crate::{ + matrix::{Event, EventTypeExt, Pdu as PduEvent}, + state_res::{ + RoomVersion, StateMap, + event_auth::valid_membership_change, + test_utils::{ + INITIAL_EVENTS, INITIAL_EVENTS_CREATE_ROOM, alice, charlie, ella, event_id, + member_content_ban, member_content_join, room_id, to_pdu_event, + }, }, }; diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index 651f6130..ed5aa034 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -37,7 +37,7 @@ pub use self::{ }; use crate::{ debug, debug_error, - matrix::{event::Event, pdu::StateKey}, + matrix::{Event, StateKey}, trace, utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, WidebandExt}, warn, @@ -90,7 +90,7 @@ where SetIter: Iterator> + Clone + Send, Hasher: BuildHasher + Send + Sync, E: Event + Clone + Send + Sync, - for<'b> &'b E: Send, + for<'b> &'b E: Event + Send, { debug!("State resolution starting"); @@ -522,6 +522,7 @@ where Fut: Future> + Send, S: Stream + Send + 'a, E: Event + Clone + Send + Sync, + for<'b> &'b E: Event + Send, { debug!("starting iterative auth check"); @@ -552,7 +553,7 @@ where let auth_events = &auth_events; let mut resolved_state = unconflicted_state; - for event in &events_to_check { + for event in events_to_check { let state_key = event .state_key() .ok_or_else(|| Error::InvalidPdu("State event had no state key".to_owned()))?; @@ -607,11 +608,15 @@ where }); let fetch_state = |ty: &StateEventType, key: &str| { - future::ready(auth_state.get(&ty.with_state_key(key))) + future::ready( + auth_state + .get(&ty.with_state_key(key)) + .map(ToOwned::to_owned), + ) }; let auth_result = - auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await; + auth_check(room_version, &event, current_third_party, fetch_state).await; match auth_result { | Ok(true) => { @@ -794,11 +799,11 @@ where } } -fn is_type_and_key(ev: impl Event, ev_type: &TimelineEventType, state_key: &str) -> bool { +fn is_type_and_key(ev: &impl Event, ev_type: &TimelineEventType, state_key: &str) -> bool { ev.event_type() == ev_type && ev.state_key() == Some(state_key) } -fn is_power_event(event: impl Event) -> bool { +fn is_power_event(event: &impl Event) -> bool { match event.event_type() { | TimelineEventType::RoomPowerLevels | TimelineEventType::RoomJoinRules @@ -859,15 +864,19 @@ mod tests { use serde_json::{json, value::to_raw_value as to_raw_json_value}; use super::{ - Event, EventTypeExt, StateMap, is_power_event, + StateMap, is_power_event, room_version::RoomVersion, test_utils::{ - INITIAL_EVENTS, PduEvent, TestStore, alice, bob, charlie, do_check, ella, event_id, + INITIAL_EVENTS, TestStore, alice, bob, charlie, do_check, ella, event_id, member_content_ban, member_content_join, room_id, to_init_pdu_event, to_pdu_event, zara, }, }; - use crate::{debug, utils::stream::IterStream}; + use crate::{ + debug, + matrix::{Event, EventTypeExt, Pdu as PduEvent}, + utils::stream::IterStream, + }; async fn test_event_sort() { use futures::future::ready; diff --git a/src/core/matrix/state_res/test_utils.rs b/src/core/matrix/state_res/test_utils.rs index c6945f66..9f24c51b 100644 --- a/src/core/matrix/state_res/test_utils.rs +++ b/src/core/matrix/state_res/test_utils.rs @@ -10,7 +10,6 @@ use ruma::{ UserId, event_id, events::{ TimelineEventType, - pdu::{EventHash, Pdu, RoomV3Pdu}, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, @@ -23,17 +22,16 @@ use serde_json::{ value::{RawValue as RawJsonValue, to_raw_value as to_raw_json_value}, }; -pub(crate) use self::event::PduEvent; use super::auth_types_for_event; use crate::{ Result, info, - matrix::{Event, EventTypeExt, StateMap}, + matrix::{Event, EventTypeExt, Pdu, StateMap, pdu::EventHash}, }; static SERVER_TIMESTAMP: AtomicU64 = AtomicU64::new(0); pub(crate) async fn do_check( - events: &[PduEvent], + events: &[Pdu], edges: Vec>, expected_state_ids: Vec, ) { @@ -81,8 +79,8 @@ pub(crate) async fn do_check( } } - // event_id -> PduEvent - let mut event_map: HashMap = HashMap::new(); + // event_id -> Pdu + let mut event_map: HashMap = HashMap::new(); // event_id -> StateMap let mut state_at_event: HashMap> = HashMap::new(); @@ -265,7 +263,7 @@ impl TestStore { // A StateStore implementation for testing #[allow(clippy::type_complexity)] -impl TestStore { +impl TestStore { pub(crate) fn set_up( &mut self, ) -> (StateMap, StateMap, StateMap) { @@ -390,7 +388,7 @@ pub(crate) fn to_init_pdu_event( ev_type: TimelineEventType, state_key: Option<&str>, content: Box, -) -> PduEvent { +) -> Pdu { let ts = SERVER_TIMESTAMP.fetch_add(1, SeqCst); let id = if id.contains('$') { id.to_owned() @@ -398,24 +396,22 @@ pub(crate) fn to_init_pdu_event( format!("${id}:foo") }; - let state_key = state_key.map(ToOwned::to_owned); - PduEvent { + Pdu { event_id: id.try_into().unwrap(), - rest: Pdu::RoomV3Pdu(RoomV3Pdu { - room_id: room_id().to_owned(), - sender: sender.to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), - state_key, - kind: ev_type, - content, - redacts: None, - unsigned: BTreeMap::new(), - auth_events: vec![], - prev_events: vec![], - depth: uint!(0), - hashes: EventHash::new("".to_owned()), - signatures: ServerSignatures::default(), - }), + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: ts.try_into().unwrap(), + state_key: state_key.map(Into::into), + kind: ev_type, + content, + origin: None, + redacts: None, + unsigned: None, + auth_events: vec![], + prev_events: vec![], + depth: uint!(0), + hashes: EventHash { sha256: "".to_owned() }, + signatures: None, } } @@ -427,7 +423,7 @@ pub(crate) fn to_pdu_event( content: Box, auth_events: &[S], prev_events: &[S], -) -> PduEvent +) -> Pdu where S: AsRef, { @@ -448,30 +444,28 @@ where .map(event_id) .collect::>(); - let state_key = state_key.map(ToOwned::to_owned); - PduEvent { + Pdu { event_id: id.try_into().unwrap(), - rest: Pdu::RoomV3Pdu(RoomV3Pdu { - room_id: room_id().to_owned(), - sender: sender.to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch(ts.try_into().unwrap()), - state_key, - kind: ev_type, - content, - redacts: None, - unsigned: BTreeMap::new(), - auth_events, - prev_events, - depth: uint!(0), - hashes: EventHash::new("".to_owned()), - signatures: ServerSignatures::default(), - }), + room_id: room_id().to_owned(), + sender: sender.to_owned(), + origin_server_ts: ts.try_into().unwrap(), + state_key: state_key.map(Into::into), + kind: ev_type, + content, + origin: None, + redacts: None, + unsigned: None, + auth_events, + prev_events, + depth: uint!(0), + hashes: EventHash { sha256: "".to_owned() }, + signatures: None, } } // all graphs start with these input events #[allow(non_snake_case)] -pub(crate) fn INITIAL_EVENTS() -> HashMap { +pub(crate) fn INITIAL_EVENTS() -> HashMap { vec![ to_pdu_event::<&EventId>( "CREATE", @@ -553,7 +547,7 @@ pub(crate) fn INITIAL_EVENTS() -> HashMap { // all graphs start with these input events #[allow(non_snake_case)] -pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap { +pub(crate) fn INITIAL_EVENTS_CREATE_ROOM() -> HashMap { vec![to_pdu_event::<&EventId>( "CREATE", alice(), @@ -575,111 +569,3 @@ pub(crate) fn INITIAL_EDGES() -> Vec { .map(event_id) .collect::>() } - -pub(crate) mod event { - use ruma::{ - EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, UserId, - events::{TimelineEventType, pdu::Pdu}, - }; - use serde::{Deserialize, Serialize}; - use serde_json::value::RawValue as RawJsonValue; - - use crate::Event; - - impl Event for PduEvent { - fn event_id(&self) -> &EventId { &self.event_id } - - fn room_id(&self) -> &RoomId { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.room_id, - | Pdu::RoomV3Pdu(ev) => &ev.room_id, - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - fn sender(&self) -> &UserId { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.sender, - | Pdu::RoomV3Pdu(ev) => &ev.sender, - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - fn event_type(&self) -> &TimelineEventType { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.kind, - | Pdu::RoomV3Pdu(ev) => &ev.kind, - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - fn content(&self) -> &RawJsonValue { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => &ev.content, - | Pdu::RoomV3Pdu(ev) => &ev.content, - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.origin_server_ts, - | Pdu::RoomV3Pdu(ev) => ev.origin_server_ts, - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - fn state_key(&self) -> Option<&str> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.state_key.as_deref(), - | Pdu::RoomV3Pdu(ev) => ev.state_key.as_deref(), - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - #[allow(refining_impl_trait)] - fn prev_events(&self) -> Box + Send + '_> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => - Box::new(ev.prev_events.iter().map(|(id, _)| id.as_ref())), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.prev_events.iter().map(AsRef::as_ref)), - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - #[allow(refining_impl_trait)] - fn auth_events(&self) -> Box + Send + '_> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => - Box::new(ev.auth_events.iter().map(|(id, _)| id.as_ref())), - | Pdu::RoomV3Pdu(ev) => Box::new(ev.auth_events.iter().map(AsRef::as_ref)), - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - - fn redacts(&self) -> Option<&EventId> { - match &self.rest { - | Pdu::RoomV1Pdu(ev) => ev.redacts.as_deref(), - | Pdu::RoomV3Pdu(ev) => ev.redacts.as_deref(), - #[allow(unreachable_patterns)] - | _ => unreachable!("new PDU version"), - } - } - } - - #[derive(Clone, Debug, Deserialize, Serialize)] - #[allow(clippy::exhaustive_structs)] - pub(crate) struct PduEvent { - pub(crate) event_id: OwnedEventId, - #[serde(flatten)] - pub(crate) rest: Pdu, - } -} diff --git a/src/core/mod.rs b/src/core/mod.rs index aaacd4d8..d99139be 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -25,7 +25,9 @@ pub use info::{ rustc_flags_capture, version, version::{name, version}, }; -pub use matrix::{Event, EventTypeExt, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res}; +pub use matrix::{ + Event, EventTypeExt, Pdu, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res, +}; pub use server::Server; pub use utils::{ctor, dtor, implement, result, result::Result}; diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index a76c3ef6..66c373ec 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -9,8 +9,8 @@ use std::{ }; use async_trait::async_trait; -use conduwuit::{ - Error, PduEvent, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder, +use conduwuit_core::{ + Error, Event, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder, }; pub use create::create_admin_room; use futures::{Future, FutureExt, TryFutureExt}; @@ -361,7 +361,10 @@ impl Service { Ok(()) } - pub async fn is_admin_command(&self, pdu: &PduEvent, body: &str) -> bool { + pub async fn is_admin_command(&self, event: &E, body: &str) -> bool + where + E: Event + Send + Sync, + { // Server-side command-escape with public echo let is_escape = body.starts_with('\\'); let is_public_escape = is_escape && body.trim_start_matches('\\').starts_with("!admin"); @@ -376,8 +379,10 @@ impl Service { return false; } + let user_is_local = self.services.globals.user_is_local(event.sender()); + // only allow public escaped commands by local admins - if is_public_escape && !self.services.globals.user_is_local(&pdu.sender) { + if is_public_escape && !user_is_local { return false; } @@ -387,20 +392,20 @@ impl Service { } // Prevent unescaped !admin from being used outside of the admin room - if is_public_prefix && !self.is_admin_room(&pdu.room_id).await { + if is_public_prefix && !self.is_admin_room(event.room_id()).await { return false; } // Only senders who are admin can proceed - if !self.user_is_admin(&pdu.sender).await { + if !self.user_is_admin(event.sender()).await { return false; } // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as the server user let emergency_password_set = self.services.server.config.emergency_password.is_some(); - let from_server = pdu.sender == *server_user && !emergency_password_set; - if from_server && self.is_admin_room(&pdu.room_id).await { + let from_server = event.sender() == server_user && !emergency_password_set; + if from_server && self.is_admin_room(event.room_id()).await { return false; } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 27490fb8..192ef447 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,12 +1,12 @@ use std::{fmt::Debug, mem, sync::Arc}; use bytes::BytesMut; -use conduwuit::{ - Err, PduEvent, Result, debug_warn, err, trace, +use conduwuit_core::{ + Err, Event, Result, debug_warn, err, trace, utils::{stream::TryIgnore, string_from_bytes}, warn, }; -use database::{Deserialized, Ignore, Interfix, Json, Map}; +use conduwuit_database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt}; use ipaddress::IPAddress; use ruma::{ @@ -272,22 +272,26 @@ impl Service { } } - #[tracing::instrument(skip(self, user, unread, pusher, ruleset, pdu))] - pub async fn send_push_notice( + #[tracing::instrument(skip(self, user, unread, pusher, ruleset, event))] + pub async fn send_push_notice( &self, user: &UserId, unread: UInt, pusher: &Pusher, ruleset: Ruleset, - pdu: &PduEvent, - ) -> Result<()> { + event: &E, + ) -> Result + where + E: Event + Send + Sync, + for<'a> &'a E: Event + Send, + { let mut notify = None; let mut tweaks = Vec::new(); let power_levels: RoomPowerLevelsEventContent = self .services .state_accessor - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "") + .room_state_get(event.room_id(), &StateEventType::RoomPowerLevels, "") .await .and_then(|ev| { serde_json::from_str(ev.content.get()).map_err(|e| { @@ -296,8 +300,9 @@ impl Service { }) .unwrap_or_default(); + let serialized = event.to_format(); for action in self - .get_actions(user, &ruleset, &power_levels, &pdu.to_sync_room_event(), &pdu.room_id) + .get_actions(user, &ruleset, &power_levels, &serialized, event.room_id()) .await { let n = match action { @@ -319,7 +324,7 @@ impl Service { } if notify == Some(true) { - self.send_notice(unread, pusher, tweaks, pdu).await?; + self.send_notice(unread, pusher, tweaks, event).await?; } // Else the event triggered no actions @@ -369,13 +374,16 @@ impl Service { } #[tracing::instrument(skip(self, unread, pusher, tweaks, event))] - async fn send_notice( + async fn send_notice( &self, unread: UInt, pusher: &Pusher, tweaks: Vec, - event: &PduEvent, - ) -> Result { + event: &E, + ) -> Result + where + E: Event + Send + Sync, + { // TODO: email match &pusher.kind { | PusherKind::Http(http) => { @@ -421,8 +429,8 @@ impl Service { let d = vec![device]; let mut notifi = Notification::new(d); - notifi.event_id = Some((*event.event_id).to_owned()); - notifi.room_id = Some((*event.room_id).to_owned()); + notifi.event_id = Some(event.event_id().to_owned()); + notifi.room_id = Some(event.room_id().to_owned()); if http .data .get("org.matrix.msc4076.disable_badge_count") @@ -442,7 +450,7 @@ impl Service { ) .await?; } else { - if event.kind == TimelineEventType::RoomEncrypted + if *event.kind() == TimelineEventType::RoomEncrypted || tweaks .iter() .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) @@ -451,29 +459,29 @@ impl Service { } else { notifi.prio = NotificationPriority::Low; } - notifi.sender = Some(event.sender.clone()); - notifi.event_type = Some(event.kind.clone()); - notifi.content = serde_json::value::to_raw_value(&event.content).ok(); + notifi.sender = Some(event.sender().to_owned()); + notifi.event_type = Some(event.kind().to_owned()); + notifi.content = serde_json::value::to_raw_value(event.content()).ok(); - if event.kind == TimelineEventType::RoomMember { + if *event.kind() == TimelineEventType::RoomMember { notifi.user_is_target = - event.state_key.as_deref() == Some(event.sender.as_str()); + event.state_key() == Some(event.sender().as_str()); } notifi.sender_display_name = - self.services.users.displayname(&event.sender).await.ok(); + self.services.users.displayname(event.sender()).await.ok(); notifi.room_name = self .services .state_accessor - .get_name(&event.room_id) + .get_name(event.room_id()) .await .ok(); notifi.room_alias = self .services .state_accessor - .get_canonical_alias(&event.room_id) + .get_canonical_alias(event.room_id()) .await .ok(); diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index feebf8c1..5cc6be55 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -126,7 +126,7 @@ pub(super) async fn handle_outlier_pdu<'a>( let state_fetch = |ty: &StateEventType, sk: &str| { let key = (ty.to_owned(), sk.into()); - ready(auth_events.get(&key)) + ready(auth_events.get(&key).map(ToOwned::to_owned)) }; let auth_check = state_res::event_auth::auth_check( diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 97d3df97..00b18c06 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -2,7 +2,7 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::In use conduwuit::{ Err, Result, debug, debug_info, err, implement, - matrix::{EventTypeExt, PduEvent, StateKey, state_res}, + matrix::{Event, EventTypeExt, PduEvent, StateKey, state_res}, trace, utils::stream::{BroadbandExt, ReadyExt}, warn, @@ -108,7 +108,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( let state_fetch = |k: &StateEventType, s: &str| { let key = k.with_state_key(s); - ready(auth_events.get(&key).cloned()) + ready(auth_events.get(&key).map(ToOwned::to_owned)) }; let auth_check = state_res::event_auth::auth_check( diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 4100dd75..b9d067a6 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use conduwuit::{ - PduCount, PduEvent, Result, +use conduwuit_core::{ + Event, PduCount, PduEvent, Result, arrayvec::ArrayVec, implement, utils::{ diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 53d2b742..de2647ca 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -5,8 +5,8 @@ mod tests; use std::{fmt::Write, sync::Arc}; use async_trait::async_trait; -use conduwuit::{ - Err, Error, PduEvent, Result, implement, +use conduwuit_core::{ + Err, Error, Event, PduEvent, Result, implement, utils::{ IterStream, future::{BoolExt, TryExtExt}, @@ -142,7 +142,7 @@ pub async fn get_summary_and_children_local( let children_pdus: Vec<_> = self .get_space_child_events(current_room) - .map(PduEvent::into_stripped_spacechild_state_event) + .map(Event::into_format) .collect() .await; @@ -511,7 +511,7 @@ async fn cache_insert( room_id: room_id.clone(), children_state: self .get_space_child_events(&room_id) - .map(PduEvent::into_stripped_spacechild_state_event) + .map(Event::into_format) .collect() .await, encryption, diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 803ba9d7..9eb02221 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,8 +1,8 @@ use std::{collections::HashMap, fmt::Write, iter::once, sync::Arc}; use async_trait::async_trait; -use conduwuit::{ - PduEvent, Result, err, +use conduwuit_core::{ + Event, PduEvent, Result, err, result::FlatOk, state_res::{self, StateMap}, utils::{ @@ -11,7 +11,7 @@ use conduwuit::{ }, warn, }; -use database::{Deserialized, Ignore, Interfix, Map}; +use conduwuit_database::{Deserialized, Ignore, Interfix, Map}; use futures::{ FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt, future::join_all, pin_mut, }; @@ -319,30 +319,34 @@ impl Service { } #[tracing::instrument(skip_all, level = "debug")] - pub async fn summary_stripped(&self, event: &PduEvent) -> Vec> { + pub async fn summary_stripped<'a, E>(&self, event: &'a E) -> Vec> + where + E: Event + Send + Sync, + &'a E: Event + Send, + { let cells = [ (&StateEventType::RoomCreate, ""), (&StateEventType::RoomJoinRules, ""), (&StateEventType::RoomCanonicalAlias, ""), (&StateEventType::RoomName, ""), (&StateEventType::RoomAvatar, ""), - (&StateEventType::RoomMember, event.sender.as_str()), // Add recommended events + (&StateEventType::RoomMember, event.sender().as_str()), // Add recommended events (&StateEventType::RoomEncryption, ""), (&StateEventType::RoomTopic, ""), ]; - let fetches = cells.iter().map(|(event_type, state_key)| { + let fetches = cells.into_iter().map(|(event_type, state_key)| { self.services .state_accessor - .room_state_get(&event.room_id, event_type, state_key) + .room_state_get(event.room_id(), event_type, state_key) }); join_all(fetches) .await .into_iter() .filter_map(Result::ok) - .map(PduEvent::into_stripped_state_event) - .chain(once(event.to_stripped_state_event())) + .map(Event::into_format) + .chain(once(event.to_format())) .collect() } diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index a680df55..9566eb61 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -1,7 +1,7 @@ use std::{collections::BTreeMap, sync::Arc}; -use conduwuit::{ - Result, err, +use conduwuit_core::{ + Event, Result, err, matrix::pdu::{PduCount, PduEvent, PduId, RawPduId}, utils::{ ReadyExt, @@ -49,7 +49,11 @@ impl crate::Service for Service { } impl Service { - pub async fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { + pub async fn add_to_thread<'a, E>(&self, root_event_id: &EventId, event: &'a E) -> Result + where + E: Event + Send + Sync, + &'a E: Event + Send, + { let root_id = self .services .timeline @@ -86,7 +90,7 @@ impl Service { }) { // Thread already existed relations.count = relations.count.saturating_add(uint!(1)); - relations.latest_event = pdu.to_message_like_event(); + relations.latest_event = event.to_format(); let content = serde_json::to_value(relations).expect("to_value always works"); @@ -99,7 +103,7 @@ impl Service { } else { // New thread let relations = BundledThread { - latest_event: pdu.to_message_like_event(), + latest_event: event.to_format(), count: uint!(1), current_user_participated: true, }; @@ -129,7 +133,7 @@ impl Service { users.push(root_pdu.sender); }, } - users.push(pdu.sender.clone()); + users.push(event.sender().to_owned()); self.update_participants(&root_id, &users) } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 534d8faf..bcad1309 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -375,8 +375,6 @@ impl Service { .await .unwrap_or_default(); - let sync_pdu = pdu.to_sync_room_event(); - let mut push_target: HashSet<_> = self .services .state_cache @@ -401,6 +399,7 @@ impl Service { } } + let serialized = pdu.to_format(); for user in &push_target { let rules_for_user = self .services @@ -418,7 +417,7 @@ impl Service { for action in self .services .pusher - .get_actions(user, &rules_for_user, &power_levels, &sync_pdu, &pdu.room_id) + .get_actions(user, &rules_for_user, &power_levels, &serialized, &pdu.room_id) .await { match action { @@ -768,7 +767,7 @@ impl Service { let auth_fetch = |k: &StateEventType, s: &str| { let key = (k.clone(), s.into()); - ready(auth_events.get(&key)) + ready(auth_events.get(&key).map(ToOwned::to_owned)) }; let auth_check = state_res::auth_check( diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index fab02f6b..408ab17d 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -9,8 +9,8 @@ use std::{ }; use base64::{Engine as _, engine::general_purpose::URL_SAFE_NO_PAD}; -use conduwuit::{ - Error, Result, debug, err, error, +use conduwuit_core::{ + Error, Event, Result, debug, err, error, result::LogErr, trace, utils::{ @@ -697,7 +697,7 @@ impl Service { match event { | SendingEvent::Pdu(pdu_id) => { if let Ok(pdu) = self.services.timeline.get_pdu_from_id(pdu_id).await { - pdu_jsons.push(pdu.into_room_event()); + pdu_jsons.push(pdu.to_format()); } }, | SendingEvent::Edu(edu) => From af4f66c768c8edc1c0da66f583ddb4c17201c4f0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 27 Apr 2025 00:58:56 +0000 Subject: [PATCH 078/270] Cleanup/improve other async queries in some client handlers. Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 4 +- src/admin/processor.rs | 3 +- src/admin/user/commands.rs | 5 +- src/api/client/membership.rs | 2771 -------------------------- src/api/client/membership/ban.rs | 60 + src/api/client/membership/forget.rs | 52 + src/api/client/membership/invite.rs | 238 +++ src/api/client/membership/join.rs | 988 +++++++++ src/api/client/membership/kick.rs | 65 + src/api/client/membership/knock.rs | 767 +++++++ src/api/client/membership/leave.rs | 386 ++++ src/api/client/membership/members.rs | 147 ++ src/api/client/membership/mod.rs | 156 ++ src/api/client/membership/unban.rs | 58 + src/api/client/profile.rs | 54 +- src/api/client/room/initial_sync.rs | 41 +- 16 files changed, 2977 insertions(+), 2818 deletions(-) delete mode 100644 src/api/client/membership.rs create mode 100644 src/api/client/membership/ban.rs create mode 100644 src/api/client/membership/forget.rs create mode 100644 src/api/client/membership/invite.rs create mode 100644 src/api/client/membership/join.rs create mode 100644 src/api/client/membership/kick.rs create mode 100644 src/api/client/membership/knock.rs create mode 100644 src/api/client/membership/leave.rs create mode 100644 src/api/client/membership/members.rs create mode 100644 src/api/client/membership/mod.rs create mode 100644 src/api/client/membership/unban.rs diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 2323e3b8..74355311 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -412,7 +412,9 @@ pub(super) async fn change_log_level(&self, filter: Option, reset: bool) .reload .reload(&new_filter_layer, Some(handles)) { - | Ok(()) => return self.write_str("Successfully changed log level").await, + | Ok(()) => { + return self.write_str("Successfully changed log level").await; + }, | Err(e) => { return Err!("Failed to modify and reload the global tracing log level: {e}"); }, diff --git a/src/admin/processor.rs b/src/admin/processor.rs index 8d1fe89c..e80000c1 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -94,8 +94,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce #[allow(clippy::result_large_err)] fn handle_panic(error: &Error, command: &CommandInput) -> ProcessorResult { - let link = - "Please submit a [bug report](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). 🥺"; + let link = "Please submit a [bug report](https://forgejo.ellis.link/continuwuation/continuwuity/issues/new). 🥺"; let msg = format!("Panic occurred while processing command:\n```\n{error:#?}\n```\n{link}"); let content = RoomMessageEventContent::notice_markdown(msg); error!("Panic while processing command: {error:?}"); diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 3750d758..e15c0b2c 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -286,8 +286,9 @@ pub(super) async fn reset_password(&self, username: String, password: Option return Err!("Couldn't reset the password for user {user_id}: {e}"), - | Ok(()) => - write!(self, "Successfully reset the password for user {user_id}: `{new_password}`"), + | Ok(()) => { + write!(self, "Successfully reset the password for user {user_id}: `{new_password}`") + }, } .await } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs deleted file mode 100644 index 3c2a6fe3..00000000 --- a/src/api/client/membership.rs +++ /dev/null @@ -1,2771 +0,0 @@ -use std::{ - borrow::Borrow, - collections::{HashMap, HashSet}, - iter::once, - net::IpAddr, - sync::Arc, -}; - -use axum::extract::State; -use axum_client_ip::InsecureClientIp; -use conduwuit::{ - Err, Event, Result, at, debug, debug_error, debug_info, debug_warn, err, error, info, - is_matching, - matrix::{ - StateKey, - pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, - state_res, - }, - result::{FlatOk, NotFound}, - trace, - utils::{ - self, FutureBoolExt, - future::ReadyEqExt, - shuffle, - stream::{BroadbandExt, IterStream, ReadyExt}, - }, - warn, -}; -use conduwuit_service::{ - Services, - appservice::RegistrationInfo, - rooms::{ - state::RoomMutexGuard, - state_compressor::{CompressedState, HashSetCompressStateEvent}, - }, -}; -use futures::{FutureExt, StreamExt, TryFutureExt, join, pin_mut}; -use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, - OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, - api::{ - client::{ - error::ErrorKind, - knock::knock_room, - membership::{ - ThirdPartySigned, ban_user, forget_room, - get_member_events::{self, v3::MembershipEventFilter}, - invite_user, join_room_by_id, join_room_by_id_or_alias, - joined_members::{self, v3::RoomMember}, - joined_rooms, kick_user, leave_room, unban_user, - }, - }, - federation::{self, membership::create_invite}, - }, - canonical_json::to_canonical_value, - events::{ - StateEventType, - room::{ - join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - }, - }, -}; - -use crate::{Ruma, client::full_user_deactivate}; - -/// Checks if the room is banned in any way possible and the sender user is not -/// an admin. -/// -/// Performs automatic deactivation if `auto_deactivate_banned_room_attempts` is -/// enabled -#[tracing::instrument(skip(services))] -async fn banned_room_check( - services: &Services, - user_id: &UserId, - room_id: Option<&RoomId>, - server_name: Option<&ServerName>, - client_ip: IpAddr, -) -> Result { - if services.users.is_admin(user_id).await { - return Ok(()); - } - - if let Some(room_id) = room_id { - if services.rooms.metadata.is_banned(room_id).await - || services - .moderation - .is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid")) - { - warn!( - "User {user_id} who is not an admin attempted to send an invite for or \ - attempted to join a banned room or banned room server name: {room_id}" - ); - - if services.server.config.auto_deactivate_banned_room_attempts { - warn!( - "Automatically deactivating user {user_id} due to attempted banned room join" - ); - - if services.server.config.admin_room_notices { - services - .admin - .send_text(&format!( - "Automatically deactivating user {user_id} due to attempted banned \ - room join from IP {client_ip}" - )) - .await; - } - - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(user_id) - .map(Into::into) - .collect() - .await; - - full_user_deactivate(services, user_id, &all_joined_rooms) - .boxed() - .await?; - } - - return Err!(Request(Forbidden("This room is banned on this homeserver."))); - } - } else if let Some(server_name) = server_name { - if services - .config - .forbidden_remote_server_names - .is_match(server_name.host()) - { - warn!( - "User {user_id} who is not an admin tried joining a room which has the server \ - name {server_name} that is globally forbidden. Rejecting.", - ); - - if services.server.config.auto_deactivate_banned_room_attempts { - warn!( - "Automatically deactivating user {user_id} due to attempted banned room join" - ); - - if services.server.config.admin_room_notices { - services - .admin - .send_text(&format!( - "Automatically deactivating user {user_id} due to attempted banned \ - room join from IP {client_ip}" - )) - .await; - } - - let all_joined_rooms: Vec = services - .rooms - .state_cache - .rooms_joined(user_id) - .map(Into::into) - .collect() - .await; - - full_user_deactivate(services, user_id, &all_joined_rooms) - .boxed() - .await?; - } - - return Err!(Request(Forbidden("This remote server is banned on this homeserver."))); - } - } - - Ok(()) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/join` -/// -/// Tries to join the sender user into a room. -/// -/// - If the server knowns about this room: creates the join event and does auth -/// rules locally -/// - If the server does not know about the room: asks other servers over -/// federation -#[tracing::instrument(skip_all, fields(%client), name = "join")] -pub(crate) async fn join_room_by_id_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - if services.users.is_suspended(sender_user).await? { - return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); - } - - banned_room_check( - &services, - sender_user, - Some(&body.room_id), - body.room_id.server_name(), - client, - ) - .await?; - - // There is no body.server_name for /roomId/join - let mut servers: Vec<_> = services - .rooms - .state_cache - .servers_invite_via(&body.room_id) - .map(ToOwned::to_owned) - .collect() - .await; - - servers.extend( - services - .rooms - .state_cache - .invite_state(sender_user, &body.room_id) - .await - .unwrap_or_default() - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); - - if let Some(server) = body.room_id.server_name() { - servers.push(server.into()); - } - - servers.sort_unstable(); - servers.dedup(); - shuffle(&mut servers); - - join_room_by_id_helper( - &services, - sender_user, - &body.room_id, - body.reason.clone(), - &servers, - body.third_party_signed.as_ref(), - &body.appservice_info, - ) - .boxed() - .await -} - -/// # `POST /_matrix/client/r0/join/{roomIdOrAlias}` -/// -/// Tries to join the sender user into a room. -/// -/// - If the server knowns about this room: creates the join event and does auth -/// rules locally -/// - If the server does not know about the room: use the server name query -/// param if specified. if not specified, asks other servers over federation -/// via room alias server name and room ID server name -#[tracing::instrument(skip_all, fields(%client), name = "join")] -pub(crate) async fn join_room_by_id_or_alias_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - let appservice_info = &body.appservice_info; - let body = &body.body; - if services.users.is_suspended(sender_user).await? { - return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); - } - - let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { - | Ok(room_id) => { - banned_room_check( - &services, - sender_user, - Some(&room_id), - room_id.server_name(), - client, - ) - .boxed() - .await?; - - let mut servers = body.via.clone(); - servers.extend( - services - .rooms - .state_cache - .servers_invite_via(&room_id) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - - servers.extend( - services - .rooms - .state_cache - .invite_state(sender_user, &room_id) - .await - .unwrap_or_default() - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); - - if let Some(server) = room_id.server_name() { - servers.push(server.to_owned()); - } - - servers.sort_unstable(); - servers.dedup(); - shuffle(&mut servers); - - (servers, room_id) - }, - | Err(room_alias) => { - let (room_id, mut servers) = services - .rooms - .alias - .resolve_alias(&room_alias, Some(body.via.clone())) - .await?; - - banned_room_check( - &services, - sender_user, - Some(&room_id), - Some(room_alias.server_name()), - client, - ) - .await?; - - let addl_via_servers = services - .rooms - .state_cache - .servers_invite_via(&room_id) - .map(ToOwned::to_owned); - - let addl_state_servers = services - .rooms - .state_cache - .invite_state(sender_user, &room_id) - .await - .unwrap_or_default(); - - let mut addl_servers: Vec<_> = addl_state_servers - .iter() - .map(|event| event.get_field("sender")) - .filter_map(FlatOk::flat_ok) - .map(|user: &UserId| user.server_name().to_owned()) - .stream() - .chain(addl_via_servers) - .collect() - .await; - - addl_servers.sort_unstable(); - addl_servers.dedup(); - shuffle(&mut addl_servers); - servers.append(&mut addl_servers); - - (servers, room_id) - }, - }; - - let join_room_response = join_room_by_id_helper( - &services, - sender_user, - &room_id, - body.reason.clone(), - &servers, - body.third_party_signed.as_ref(), - appservice_info, - ) - .boxed() - .await?; - - Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id }) -} - -/// # `POST /_matrix/client/*/knock/{roomIdOrAlias}` -/// -/// Tries to knock the room to ask permission to join for the sender user. -#[tracing::instrument(skip_all, fields(%client), name = "knock")] -pub(crate) async fn knock_room_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - let body = &body.body; - if services.users.is_suspended(sender_user).await? { - return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); - } - - let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { - | Ok(room_id) => { - banned_room_check( - &services, - sender_user, - Some(&room_id), - room_id.server_name(), - client, - ) - .await?; - - let mut servers = body.via.clone(); - servers.extend( - services - .rooms - .state_cache - .servers_invite_via(&room_id) - .map(ToOwned::to_owned) - .collect::>() - .await, - ); - - servers.extend( - services - .rooms - .state_cache - .invite_state(sender_user, &room_id) - .await - .unwrap_or_default() - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); - - if let Some(server) = room_id.server_name() { - servers.push(server.to_owned()); - } - - servers.sort_unstable(); - servers.dedup(); - shuffle(&mut servers); - - (servers, room_id) - }, - | Err(room_alias) => { - let (room_id, mut servers) = services - .rooms - .alias - .resolve_alias(&room_alias, Some(body.via.clone())) - .await?; - - banned_room_check( - &services, - sender_user, - Some(&room_id), - Some(room_alias.server_name()), - client, - ) - .await?; - - let addl_via_servers = services - .rooms - .state_cache - .servers_invite_via(&room_id) - .map(ToOwned::to_owned); - - let addl_state_servers = services - .rooms - .state_cache - .invite_state(sender_user, &room_id) - .await - .unwrap_or_default(); - - let mut addl_servers: Vec<_> = addl_state_servers - .iter() - .map(|event| event.get_field("sender")) - .filter_map(FlatOk::flat_ok) - .map(|user: &UserId| user.server_name().to_owned()) - .stream() - .chain(addl_via_servers) - .collect() - .await; - - addl_servers.sort_unstable(); - addl_servers.dedup(); - shuffle(&mut addl_servers); - servers.append(&mut addl_servers); - - (servers, room_id) - }, - }; - - knock_room_by_id_helper(&services, sender_user, &room_id, body.reason.clone(), &servers) - .boxed() - .await -} - -/// # `POST /_matrix/client/v3/rooms/{roomId}/leave` -/// -/// Tries to leave the sender user from a room. -/// -/// - This should always work if the user is currently joined. -pub(crate) async fn leave_room_route( - State(services): State, - body: Ruma, -) -> Result { - leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()) - .boxed() - .await - .map(|()| leave_room::v3::Response::new()) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/invite` -/// -/// Tries to send an invite event into the room. -#[tracing::instrument(skip_all, fields(%client), name = "invite")] -pub(crate) async fn invite_user_route( - State(services): State, - InsecureClientIp(client): InsecureClientIp, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - if services.users.is_suspended(sender_user).await? { - return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); - } - - if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { - debug_error!( - "User {sender_user} is not an admin and attempted to send an invite to room {}", - &body.room_id - ); - return Err!(Request(Forbidden("Invites are not allowed on this server."))); - } - - banned_room_check( - &services, - sender_user, - Some(&body.room_id), - body.room_id.server_name(), - client, - ) - .await?; - - match &body.recipient { - | invite_user::v3::InvitationRecipient::UserId { user_id } => { - let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); - let recipient_ignored_by_sender = - services.users.user_is_ignored(user_id, sender_user); - - let (sender_ignored_recipient, recipient_ignored_by_sender) = - join!(sender_ignored_recipient, recipient_ignored_by_sender); - - if sender_ignored_recipient { - return Ok(invite_user::v3::Response {}); - } - - if let Ok(target_user_membership) = services - .rooms - .state_accessor - .get_member(&body.room_id, user_id) - .await - { - if target_user_membership.membership == MembershipState::Ban { - return Err!(Request(Forbidden("User is banned from this room."))); - } - } - - if recipient_ignored_by_sender { - // silently drop the invite to the recipient if they've been ignored by the - // sender, pretend it worked - return Ok(invite_user::v3::Response {}); - } - - invite_helper( - &services, - sender_user, - user_id, - &body.room_id, - body.reason.clone(), - false, - ) - .boxed() - .await?; - - Ok(invite_user::v3::Response {}) - }, - | _ => { - Err!(Request(NotFound("User not found."))) - }, - } -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/kick` -/// -/// Tries to send a kick event into the room. -pub(crate) async fn kick_user_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - if services.users.is_suspended(sender_user).await? { - return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); - } - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - let Ok(event) = services - .rooms - .state_accessor - .get_member(&body.room_id, &body.user_id) - .await - else { - // copy synapse's behaviour of returning 200 without any change to the state - // instead of erroring on left users - return Ok(kick_user::v3::Response::new()); - }; - - if !matches!( - event.membership, - MembershipState::Invite | MembershipState::Knock | MembershipState::Join, - ) { - return Err!(Request(Forbidden( - "Cannot kick a user who is not apart of the room (current membership: {})", - event.membership - ))); - } - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { - membership: MembershipState::Leave, - reason: body.reason.clone(), - is_direct: None, - join_authorized_via_users_server: None, - third_party_invite: None, - ..event - }), - sender_user, - &body.room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - Ok(kick_user::v3::Response::new()) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/ban` -/// -/// Tries to send a ban event into the room. -pub(crate) async fn ban_user_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - - if sender_user == body.user_id { - return Err!(Request(Forbidden("You cannot ban yourself."))); - } - - if services.users.is_suspended(sender_user).await? { - return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); - } - - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - let current_member_content = services - .rooms - .state_accessor - .get_member(&body.room_id, &body.user_id) - .await - .unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Ban)); - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { - membership: MembershipState::Ban, - reason: body.reason.clone(), - displayname: None, // display name may be offensive - avatar_url: None, // avatar may be offensive - is_direct: None, - join_authorized_via_users_server: None, - third_party_invite: None, - redact_events: body.redact_events, - ..current_member_content - }), - sender_user, - &body.room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - Ok(ban_user::v3::Response::new()) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/unban` -/// -/// Tries to send an unban event into the room. -pub(crate) async fn unban_user_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - if services.users.is_suspended(sender_user).await? { - return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); - } - let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; - - let current_member_content = services - .rooms - .state_accessor - .get_member(&body.room_id, &body.user_id) - .await - .unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Leave)); - - if current_member_content.membership != MembershipState::Ban { - return Err!(Request(Forbidden( - "Cannot unban a user who is not banned (current membership: {})", - current_member_content.membership - ))); - } - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { - membership: MembershipState::Leave, - reason: body.reason.clone(), - join_authorized_via_users_server: None, - third_party_invite: None, - is_direct: None, - ..current_member_content - }), - sender_user, - &body.room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - Ok(unban_user::v3::Response::new()) -} - -/// # `POST /_matrix/client/v3/rooms/{roomId}/forget` -/// -/// Forgets about a room. -/// -/// - If the sender user currently left the room: Stops sender user from -/// receiving information about the room -/// -/// Note: Other devices of the user have no way of knowing the room was -/// forgotten, so this has to be called from every device -pub(crate) async fn forget_room_route( - State(services): State, - body: Ruma, -) -> Result { - let user_id = body.sender_user(); - let room_id = &body.room_id; - - let joined = services.rooms.state_cache.is_joined(user_id, room_id); - let knocked = services.rooms.state_cache.is_knocked(user_id, room_id); - let invited = services.rooms.state_cache.is_invited(user_id, room_id); - - pin_mut!(joined, knocked, invited); - if joined.or(knocked).or(invited).await { - return Err!(Request(Unknown("You must leave the room before forgetting it"))); - } - - let membership = services - .rooms - .state_accessor - .get_member(room_id, user_id) - .await; - - if membership.is_not_found() { - return Err!(Request(Unknown("No membership event was found, room was never joined"))); - } - - let non_membership = membership - .map(|member| member.membership) - .is_ok_and(is_matching!(MembershipState::Leave | MembershipState::Ban)); - - if non_membership || services.rooms.state_cache.is_left(user_id, room_id).await { - services.rooms.state_cache.forget(room_id, user_id); - } - - Ok(forget_room::v3::Response::new()) -} - -/// # `POST /_matrix/client/r0/joined_rooms` -/// -/// Lists all rooms the user has joined. -pub(crate) async fn joined_rooms_route( - State(services): State, - body: Ruma, -) -> Result { - Ok(joined_rooms::v3::Response { - joined_rooms: services - .rooms - .state_cache - .rooms_joined(body.sender_user()) - .map(ToOwned::to_owned) - .collect() - .await, - }) -} - -fn membership_filter( - pdu: PduEvent, - for_membership: Option<&MembershipEventFilter>, - not_membership: Option<&MembershipEventFilter>, -) -> Option { - let membership_state_filter = match for_membership { - | Some(MembershipEventFilter::Ban) => MembershipState::Ban, - | Some(MembershipEventFilter::Invite) => MembershipState::Invite, - | Some(MembershipEventFilter::Knock) => MembershipState::Knock, - | Some(MembershipEventFilter::Leave) => MembershipState::Leave, - | Some(_) | None => MembershipState::Join, - }; - - let not_membership_state_filter = match not_membership { - | Some(MembershipEventFilter::Ban) => MembershipState::Ban, - | Some(MembershipEventFilter::Invite) => MembershipState::Invite, - | Some(MembershipEventFilter::Join) => MembershipState::Join, - | Some(MembershipEventFilter::Knock) => MembershipState::Knock, - | Some(_) | None => MembershipState::Leave, - }; - - let evt_membership = pdu.get_content::().ok()?.membership; - - if for_membership.is_some() && not_membership.is_some() { - if membership_state_filter != evt_membership - || not_membership_state_filter == evt_membership - { - None - } else { - Some(pdu) - } - } else if for_membership.is_some() && not_membership.is_none() { - if membership_state_filter != evt_membership { - None - } else { - Some(pdu) - } - } else if not_membership.is_some() && for_membership.is_none() { - if not_membership_state_filter == evt_membership { - None - } else { - Some(pdu) - } - } else { - Some(pdu) - } -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/members` -/// -/// Lists all joined users in a room (TODO: at a specific point in time, with a -/// specific membership). -/// -/// - Only works if the user is currently joined -pub(crate) async fn get_member_events_route( - State(services): State, - body: Ruma, -) -> Result { - let sender_user = body.sender_user(); - let membership = body.membership.as_ref(); - let not_membership = body.not_membership.as_ref(); - - if !services - .rooms - .state_accessor - .user_can_see_state_events(sender_user, &body.room_id) - .await - { - return Err!(Request(Forbidden("You don't have permission to view this room."))); - } - - Ok(get_member_events::v3::Response { - chunk: services - .rooms - .state_accessor - .room_state_full(&body.room_id) - .ready_filter_map(Result::ok) - .ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember) - .map(at!(1)) - .ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership)) - .map(Event::into_format) - .collect() - .await, - }) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members` -/// -/// Lists all members of a room. -/// -/// - The sender user must be in the room -/// - TODO: An appservice just needs a puppet joined -pub(crate) async fn joined_members_route( - State(services): State, - body: Ruma, -) -> Result { - if !services - .rooms - .state_accessor - .user_can_see_state_events(body.sender_user(), &body.room_id) - .await - { - return Err!(Request(Forbidden("You don't have permission to view this room."))); - } - - Ok(joined_members::v3::Response { - joined: services - .rooms - .state_cache - .room_members(&body.room_id) - .map(ToOwned::to_owned) - .broad_then(|user_id| async move { - let member = RoomMember { - display_name: services.users.displayname(&user_id).await.ok(), - avatar_url: services.users.avatar_url(&user_id).await.ok(), - }; - - (user_id, member) - }) - .collect() - .await, - }) -} - -pub async fn join_room_by_id_helper( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - reason: Option, - servers: &[OwnedServerName], - third_party_signed: Option<&ThirdPartySigned>, - appservice_info: &Option, -) -> Result { - let state_lock = services.rooms.state.mutex.lock(room_id).await; - - let user_is_guest = services - .users - .is_deactivated(sender_user) - .await - .unwrap_or(false) - && appservice_info.is_none(); - - if user_is_guest && !services.rooms.state_accessor.guest_can_join(room_id).await { - return Err!(Request(Forbidden("Guests are not allowed to join this room"))); - } - - if services - .rooms - .state_cache - .is_joined(sender_user, room_id) - .await - { - debug_warn!("{sender_user} is already joined in {room_id}"); - return Ok(join_room_by_id::v3::Response { room_id: room_id.into() }); - } - - let server_in_room = services - .rooms - .state_cache - .server_in_room(services.globals.server_name(), room_id) - .await; - - // Only check our known membership if we're already in the room. - // See: https://forgejo.ellis.link/continuwuation/continuwuity/issues/855 - let membership = if server_in_room { - services - .rooms - .state_accessor - .get_member(room_id, sender_user) - .await - } else { - debug!("Ignoring local state for join {room_id}, we aren't in the room yet."); - Ok(RoomMemberEventContent::new(MembershipState::Leave)) - }; - if let Ok(m) = membership { - if m.membership == MembershipState::Ban { - debug_warn!("{sender_user} is banned from {room_id} but attempted to join"); - // TODO: return reason - return Err!(Request(Forbidden("You are banned from the room."))); - } - } - - let local_join = server_in_room - || servers.is_empty() - || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); - - if local_join { - join_room_by_id_helper_local( - services, - sender_user, - room_id, - reason, - servers, - third_party_signed, - state_lock, - ) - .boxed() - .await?; - } else { - // Ask a remote server if we are not participating in this room - join_room_by_id_helper_remote( - services, - sender_user, - room_id, - reason, - servers, - third_party_signed, - state_lock, - ) - .boxed() - .await?; - } - - Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) -} - -#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote")] -async fn join_room_by_id_helper_remote( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - reason: Option, - servers: &[OwnedServerName], - _third_party_signed: Option<&ThirdPartySigned>, - state_lock: RoomMutexGuard, -) -> Result { - info!("Joining {room_id} over federation."); - - let (make_join_response, remote_server) = - make_join_request(services, sender_user, room_id, servers).await?; - - info!("make_join finished"); - - let Some(room_version_id) = make_join_response.room_version else { - return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); - }; - - if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); - } - - let mut join_event_stub: CanonicalJsonObject = - serde_json::from_str(make_join_response.event.get()).map_err(|e| { - err!(BadServerResponse(warn!( - "Invalid make_join event json received from server: {e:?}" - ))) - })?; - - let join_authorized_via_users_server = { - use RoomVersionId::*; - if !matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { - join_event_stub - .get("content") - .map(|s| { - s.as_object()? - .get("join_authorised_via_users_server")? - .as_str() - }) - .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()) - } else { - None - } - }; - - join_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), - ); - join_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - join_event_stub.insert( - "content".to_owned(), - to_canonical_value(RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason, - join_authorized_via_users_server: join_authorized_via_users_server.clone(), - ..RoomMemberEventContent::new(MembershipState::Join) - }) - .expect("event is valid, we just created it"), - ); - - // We keep the "event_id" in the pdu only in v1 or - // v2 rooms - match room_version_id { - | RoomVersionId::V1 | RoomVersionId::V2 => {}, - | _ => { - join_event_stub.remove("event_id"); - }, - } - - // In order to create a compatible ref hash (EventID) the `hashes` field needs - // to be present - services - .server_keys - .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; - - // Generate event id - let event_id = gen_event_id(&join_event_stub, &room_version_id)?; - - // Add event_id back - join_event_stub - .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); - - // It has enough fields to be called a proper event now - let mut join_event = join_event_stub; - - info!("Asking {remote_server} for send_join in room {room_id}"); - let send_join_request = federation::membership::create_join_event::v2::Request { - room_id: room_id.to_owned(), - event_id: event_id.clone(), - omit_members: false, - pdu: services - .sending - .convert_to_outgoing_federation_event(join_event.clone()) - .await, - }; - - let send_join_response = match services - .sending - .send_synapse_request(&remote_server, send_join_request) - .await - { - | Ok(response) => response, - | Err(e) => { - error!("send_join failed: {e}"); - return Err(e); - }, - }; - - info!("send_join finished"); - - if join_authorized_via_users_server.is_some() { - if let Some(signed_raw) = &send_join_response.room_state.event { - debug_info!( - "There is a signed event with join_authorized_via_users_server. This room is \ - probably using restricted joins. Adding signature to our event" - ); - - let (signed_event_id, signed_value) = - gen_event_id_canonical_json(signed_raw, &room_version_id).map_err(|e| { - err!(Request(BadJson(warn!( - "Could not convert event to canonical JSON: {e}" - )))) - })?; - - if signed_event_id != event_id { - return Err!(Request(BadJson(warn!( - %signed_event_id, %event_id, - "Server {remote_server} sent event with wrong event ID" - )))); - } - - match signed_value["signatures"] - .as_object() - .ok_or_else(|| { - err!(BadServerResponse(warn!( - "Server {remote_server} sent invalid signatures type" - ))) - }) - .and_then(|e| { - e.get(remote_server.as_str()).ok_or_else(|| { - err!(BadServerResponse(warn!( - "Server {remote_server} did not send its signature for a restricted \ - room" - ))) - }) - }) { - | Ok(signature) => { - join_event - .get_mut("signatures") - .expect("we created a valid pdu") - .as_object_mut() - .expect("we created a valid pdu") - .insert(remote_server.to_string(), signature.clone()); - }, - | Err(e) => { - warn!( - "Server {remote_server} sent invalid signature in send_join signatures \ - for event {signed_value:?}: {e:?}", - ); - }, - } - } - } - - services - .rooms - .short - .get_or_create_shortroomid(room_id) - .await; - - info!("Parsing join event"); - let parsed_join_pdu = PduEvent::from_id_val(&event_id, join_event.clone()) - .map_err(|e| err!(BadServerResponse("Invalid join event PDU: {e:?}")))?; - - info!("Acquiring server signing keys for response events"); - let resp_events = &send_join_response.room_state; - let resp_state = &resp_events.state; - let resp_auth = &resp_events.auth_chain; - services - .server_keys - .acquire_events_pubkeys(resp_auth.iter().chain(resp_state.iter())) - .await; - - info!("Going through send_join response room_state"); - let cork = services.db.cork_and_flush(); - let state = send_join_response - .room_state - .state - .iter() - .stream() - .then(|pdu| { - services - .server_keys - .validate_and_add_event_id_no_fetch(pdu, &room_version_id) - }) - .ready_filter_map(Result::ok) - .fold(HashMap::new(), |mut state, (event_id, value)| async move { - let pdu = match PduEvent::from_id_val(&event_id, value.clone()) { - | Ok(pdu) => pdu, - | Err(e) => { - debug_warn!("Invalid PDU in send_join response: {e:?}: {value:#?}"); - return state; - }, - }; - - services.rooms.outlier.add_pdu_outlier(&event_id, &value); - if let Some(state_key) = &pdu.state_key { - let shortstatekey = services - .rooms - .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key) - .await; - - state.insert(shortstatekey, pdu.event_id.clone()); - } - - state - }) - .await; - - drop(cork); - - info!("Going through send_join response auth_chain"); - let cork = services.db.cork_and_flush(); - send_join_response - .room_state - .auth_chain - .iter() - .stream() - .then(|pdu| { - services - .server_keys - .validate_and_add_event_id_no_fetch(pdu, &room_version_id) - }) - .ready_filter_map(Result::ok) - .ready_for_each(|(event_id, value)| { - services.rooms.outlier.add_pdu_outlier(&event_id, &value); - }) - .await; - - drop(cork); - - debug!("Running send_join auth check"); - let fetch_state = &state; - let state_fetch = |k: StateEventType, s: StateKey| async move { - let shortstatekey = services.rooms.short.get_shortstatekey(&k, &s).await.ok()?; - - let event_id = fetch_state.get(&shortstatekey)?; - services.rooms.timeline.get_pdu(event_id).await.ok() - }; - - let auth_check = state_res::event_auth::auth_check( - &state_res::RoomVersion::new(&room_version_id)?, - &parsed_join_pdu, - None, // TODO: third party invite - |k, s| state_fetch(k.clone(), s.into()), - ) - .await - .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; - - if !auth_check { - return Err!(Request(Forbidden("Auth check failed"))); - } - - info!("Compressing state from send_join"); - let compressed: CompressedState = services - .rooms - .state_compressor - .compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) - .collect() - .await; - - debug!("Saving compressed state"); - let HashSetCompressStateEvent { - shortstatehash: statehash_before_join, - added, - removed, - } = services - .rooms - .state_compressor - .save_state(room_id, Arc::new(compressed)) - .await?; - - debug!("Forcing state for new room"); - services - .rooms - .state - .force_state(room_id, statehash_before_join, added, removed, &state_lock) - .await?; - - info!("Updating joined counts for new room"); - services - .rooms - .state_cache - .update_joined_count(room_id) - .await; - - // We append to state before appending the pdu, so we don't have a moment in - // time with the pdu without it's state. This is okay because append_pdu can't - // fail. - let statehash_after_join = services - .rooms - .state - .append_to_state(&parsed_join_pdu) - .await?; - - info!("Appending new room join event"); - services - .rooms - .timeline - .append_pdu( - &parsed_join_pdu, - join_event, - once(parsed_join_pdu.event_id.borrow()), - &state_lock, - ) - .await?; - - info!("Setting final room state for new room"); - // We set the room state after inserting the pdu, so that we never have a moment - // in time where events in the current room state do not exist - services - .rooms - .state - .set_room_state(room_id, statehash_after_join, &state_lock); - - Ok(()) -} - -#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local")] -async fn join_room_by_id_helper_local( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - reason: Option, - servers: &[OwnedServerName], - _third_party_signed: Option<&ThirdPartySigned>, - state_lock: RoomMutexGuard, -) -> Result { - debug_info!("We can join locally"); - - let join_rules_event_content = services - .rooms - .state_accessor - .room_state_get_content::( - room_id, - &StateEventType::RoomJoinRules, - "", - ) - .await; - - let restriction_rooms = match join_rules_event_content { - | Ok(RoomJoinRulesEventContent { - join_rule: JoinRule::Restricted(restricted) | JoinRule::KnockRestricted(restricted), - }) => restricted - .allow - .into_iter() - .filter_map(|a| match a { - | AllowRule::RoomMembership(r) => Some(r.room_id), - | _ => None, - }) - .collect(), - | _ => Vec::new(), - }; - - let join_authorized_via_users_server: Option = { - if restriction_rooms - .iter() - .stream() - .any(|restriction_room_id| { - services - .rooms - .state_cache - .is_joined(sender_user, restriction_room_id) - }) - .await - { - services - .rooms - .state_cache - .local_users_in_room(room_id) - .filter(|user| { - services.rooms.state_accessor.user_can_invite( - room_id, - user, - sender_user, - &state_lock, - ) - }) - .boxed() - .next() - .await - .map(ToOwned::to_owned) - } else { - None - } - }; - - let content = RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason: reason.clone(), - join_authorized_via_users_server, - ..RoomMemberEventContent::new(MembershipState::Join) - }; - - // Try normal join first - let Err(error) = services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(sender_user.to_string(), &content), - sender_user, - room_id, - &state_lock, - ) - .await - else { - return Ok(()); - }; - - if restriction_rooms.is_empty() - && (servers.is_empty() - || servers.len() == 1 && services.globals.server_is_ours(&servers[0])) - { - return Err(error); - } - - warn!( - "We couldn't do the join locally, maybe federation can help to satisfy the restricted \ - join requirements" - ); - let Ok((make_join_response, remote_server)) = - make_join_request(services, sender_user, room_id, servers).await - else { - return Err(error); - }; - - let Some(room_version_id) = make_join_response.room_version else { - return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); - }; - - if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); - } - - let mut join_event_stub: CanonicalJsonObject = - serde_json::from_str(make_join_response.event.get()).map_err(|e| { - err!(BadServerResponse("Invalid make_join event json received from server: {e:?}")) - })?; - - let join_authorized_via_users_server = join_event_stub - .get("content") - .map(|s| { - s.as_object()? - .get("join_authorised_via_users_server")? - .as_str() - }) - .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); - - join_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), - ); - join_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - join_event_stub.insert( - "content".to_owned(), - to_canonical_value(RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason, - join_authorized_via_users_server, - ..RoomMemberEventContent::new(MembershipState::Join) - }) - .expect("event is valid, we just created it"), - ); - - // We keep the "event_id" in the pdu only in v1 or - // v2 rooms - match room_version_id { - | RoomVersionId::V1 | RoomVersionId::V2 => {}, - | _ => { - join_event_stub.remove("event_id"); - }, - } - - // In order to create a compatible ref hash (EventID) the `hashes` field needs - // to be present - services - .server_keys - .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; - - // Generate event id - let event_id = gen_event_id(&join_event_stub, &room_version_id)?; - - // Add event_id back - join_event_stub - .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); - - // It has enough fields to be called a proper event now - let join_event = join_event_stub; - - let send_join_response = services - .sending - .send_synapse_request( - &remote_server, - federation::membership::create_join_event::v2::Request { - room_id: room_id.to_owned(), - event_id: event_id.clone(), - omit_members: false, - pdu: services - .sending - .convert_to_outgoing_federation_event(join_event.clone()) - .await, - }, - ) - .await?; - - if let Some(signed_raw) = send_join_response.room_state.event { - let (signed_event_id, signed_value) = - gen_event_id_canonical_json(&signed_raw, &room_version_id).map_err(|e| { - err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))) - })?; - - if signed_event_id != event_id { - return Err!(Request(BadJson( - warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID") - ))); - } - - drop(state_lock); - services - .rooms - .event_handler - .handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true) - .boxed() - .await?; - } else { - return Err(error); - } - - Ok(()) -} - -async fn make_join_request( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - servers: &[OwnedServerName], -) -> Result<(federation::membership::prepare_join_event::v1::Response, OwnedServerName)> { - let mut make_join_response_and_server = - Err!(BadServerResponse("No server available to assist in joining.")); - - let mut make_join_counter: usize = 0; - let mut incompatible_room_version_count: usize = 0; - - for remote_server in servers { - if services.globals.server_is_ours(remote_server) { - continue; - } - info!("Asking {remote_server} for make_join ({make_join_counter})"); - let make_join_response = services - .sending - .send_federation_request( - remote_server, - federation::membership::prepare_join_event::v1::Request { - room_id: room_id.to_owned(), - user_id: sender_user.to_owned(), - ver: services.server.supported_room_versions().collect(), - }, - ) - .await; - - trace!("make_join response: {:?}", make_join_response); - make_join_counter = make_join_counter.saturating_add(1); - - if let Err(ref e) = make_join_response { - if matches!( - e.kind(), - ErrorKind::IncompatibleRoomVersion { .. } | ErrorKind::UnsupportedRoomVersion - ) { - incompatible_room_version_count = - incompatible_room_version_count.saturating_add(1); - } - - if incompatible_room_version_count > 15 { - info!( - "15 servers have responded with M_INCOMPATIBLE_ROOM_VERSION or \ - M_UNSUPPORTED_ROOM_VERSION, assuming that conduwuit does not support the \ - room version {room_id}: {e}" - ); - make_join_response_and_server = - Err!(BadServerResponse("Room version is not supported by Conduwuit")); - return make_join_response_and_server; - } - - if make_join_counter > 40 { - warn!( - "40 servers failed to provide valid make_join response, assuming no server \ - can assist in joining." - ); - make_join_response_and_server = - Err!(BadServerResponse("No server available to assist in joining.")); - - return make_join_response_and_server; - } - } - - make_join_response_and_server = make_join_response.map(|r| (r, remote_server.clone())); - - if make_join_response_and_server.is_ok() { - break; - } - } - - make_join_response_and_server -} - -pub(crate) async fn invite_helper( - services: &Services, - sender_user: &UserId, - user_id: &UserId, - room_id: &RoomId, - reason: Option, - is_direct: bool, -) -> Result { - if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { - info!( - "User {sender_user} is not an admin and attempted to send an invite to room \ - {room_id}" - ); - return Err!(Request(Forbidden("Invites are not allowed on this server."))); - } - - if !services.globals.user_is_local(user_id) { - let (pdu, pdu_json, invite_room_state) = { - let state_lock = services.rooms.state.mutex.lock(room_id).await; - - let content = RoomMemberEventContent { - avatar_url: services.users.avatar_url(user_id).await.ok(), - is_direct: Some(is_direct), - reason, - ..RoomMemberEventContent::new(MembershipState::Invite) - }; - - let (pdu, pdu_json) = services - .rooms - .timeline - .create_hash_and_sign_event( - PduBuilder::state(user_id.to_string(), &content), - sender_user, - room_id, - &state_lock, - ) - .await?; - - let invite_room_state = services.rooms.state.summary_stripped(&pdu).await; - - drop(state_lock); - - (pdu, pdu_json, invite_room_state) - }; - - let room_version_id = services.rooms.state.get_room_version(room_id).await?; - - let response = services - .sending - .send_federation_request(user_id.server_name(), create_invite::v2::Request { - room_id: room_id.to_owned(), - event_id: (*pdu.event_id).to_owned(), - room_version: room_version_id.clone(), - event: services - .sending - .convert_to_outgoing_federation_event(pdu_json.clone()) - .await, - invite_room_state, - via: services - .rooms - .state_cache - .servers_route_via(room_id) - .await - .ok(), - }) - .await?; - - // We do not add the event_id field to the pdu here because of signature and - // hashes checks - let (event_id, value) = gen_event_id_canonical_json(&response.event, &room_version_id) - .map_err(|e| { - err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))) - })?; - - if pdu.event_id != event_id { - return Err!(Request(BadJson(warn!( - %pdu.event_id, %event_id, - "Server {} sent event with wrong event ID", - user_id.server_name() - )))); - } - - let origin: OwnedServerName = serde_json::from_value(serde_json::to_value( - value - .get("origin") - .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, - )?) - .map_err(|e| { - err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}")))) - })?; - - let pdu_id = services - .rooms - .event_handler - .handle_incoming_pdu(&origin, room_id, &event_id, value, true) - .boxed() - .await? - .ok_or_else(|| { - err!(Request(InvalidParam("Could not accept incoming PDU as timeline event."))) - })?; - - return services.sending.send_pdu_room(room_id, &pdu_id).await; - } - - if !services - .rooms - .state_cache - .is_joined(sender_user, room_id) - .await - { - return Err!(Request(Forbidden( - "You must be joined in the room you are trying to invite from." - ))); - } - - let state_lock = services.rooms.state.mutex.lock(room_id).await; - - let content = RoomMemberEventContent { - displayname: services.users.displayname(user_id).await.ok(), - avatar_url: services.users.avatar_url(user_id).await.ok(), - blurhash: services.users.blurhash(user_id).await.ok(), - is_direct: Some(is_direct), - reason, - ..RoomMemberEventContent::new(MembershipState::Invite) - }; - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(user_id.to_string(), &content), - sender_user, - room_id, - &state_lock, - ) - .await?; - - drop(state_lock); - - Ok(()) -} - -// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms, -// and ignores errors -pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { - let rooms_joined = services - .rooms - .state_cache - .rooms_joined(user_id) - .map(ToOwned::to_owned); - - let rooms_invited = services - .rooms - .state_cache - .rooms_invited(user_id) - .map(|(r, _)| r); - - let rooms_knocked = services - .rooms - .state_cache - .rooms_knocked(user_id) - .map(|(r, _)| r); - - let all_rooms: Vec<_> = rooms_joined - .chain(rooms_invited) - .chain(rooms_knocked) - .collect() - .await; - - for room_id in all_rooms { - // ignore errors - if let Err(e) = leave_room(services, user_id, &room_id, None).boxed().await { - warn!(%user_id, "Failed to leave {room_id} remotely: {e}"); - } - - services.rooms.state_cache.forget(&room_id, user_id); - } -} - -pub async fn leave_room( - services: &Services, - user_id: &UserId, - room_id: &RoomId, - reason: Option, -) -> Result { - let default_member_content = RoomMemberEventContent { - membership: MembershipState::Leave, - reason: reason.clone(), - join_authorized_via_users_server: None, - is_direct: None, - avatar_url: None, - displayname: None, - third_party_invite: None, - blurhash: None, - redact_events: None, - }; - - let is_banned = services.rooms.metadata.is_banned(room_id); - let is_disabled = services.rooms.metadata.is_disabled(room_id); - - pin_mut!(is_banned, is_disabled); - if is_banned.or(is_disabled).await { - // the room is banned/disabled, the room must be rejected locally since we - // cant/dont want to federate with this server - services - .rooms - .state_cache - .update_membership( - room_id, - user_id, - default_member_content, - user_id, - None, - None, - true, - ) - .await?; - - return Ok(()); - } - - let dont_have_room = services - .rooms - .state_cache - .server_in_room(services.globals.server_name(), room_id) - .eq(&false); - - let not_knocked = services - .rooms - .state_cache - .is_knocked(user_id, room_id) - .eq(&false); - - // Ask a remote server if we don't have this room and are not knocking on it - if dont_have_room.and(not_knocked).await { - if let Err(e) = remote_leave_room(services, user_id, room_id, reason.clone()) - .boxed() - .await - { - warn!(%user_id, "Failed to leave room {room_id} remotely: {e}"); - // Don't tell the client about this error - } - - let last_state = services - .rooms - .state_cache - .invite_state(user_id, room_id) - .or_else(|_| services.rooms.state_cache.knock_state(user_id, room_id)) - .or_else(|_| services.rooms.state_cache.left_state(user_id, room_id)) - .await - .ok(); - - // We always drop the invite, we can't rely on other servers - services - .rooms - .state_cache - .update_membership( - room_id, - user_id, - default_member_content, - user_id, - last_state, - None, - true, - ) - .await?; - } else { - let state_lock = services.rooms.state.mutex.lock(room_id).await; - - let Ok(event) = services - .rooms - .state_accessor - .room_state_get_content::( - room_id, - &StateEventType::RoomMember, - user_id.as_str(), - ) - .await - else { - debug_warn!( - "Trying to leave a room you are not a member of, marking room as left locally." - ); - - return services - .rooms - .state_cache - .update_membership( - room_id, - user_id, - default_member_content, - user_id, - None, - None, - true, - ) - .await; - }; - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { - membership: MembershipState::Leave, - reason, - join_authorized_via_users_server: None, - is_direct: None, - ..event - }), - user_id, - room_id, - &state_lock, - ) - .await?; - } - - Ok(()) -} - -async fn remote_leave_room( - services: &Services, - user_id: &UserId, - room_id: &RoomId, - reason: Option, -) -> Result<()> { - let mut make_leave_response_and_server = - Err!(BadServerResponse("No remote server available to assist in leaving {room_id}.")); - - let mut servers: HashSet = services - .rooms - .state_cache - .servers_invite_via(room_id) - .map(ToOwned::to_owned) - .collect() - .await; - - match services - .rooms - .state_cache - .invite_state(user_id, room_id) - .await - { - | Ok(invite_state) => { - servers.extend( - invite_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()), - ); - }, - | _ => { - match services - .rooms - .state_cache - .knock_state(user_id, room_id) - .await - { - | Ok(knock_state) => { - servers.extend( - knock_state - .iter() - .filter_map(|event| event.get_field("sender").ok().flatten()) - .filter_map(|sender: &str| UserId::parse(sender).ok()) - .filter_map(|sender| { - if !services.globals.user_is_local(sender) { - Some(sender.server_name().to_owned()) - } else { - None - } - }), - ); - }, - | _ => {}, - } - }, - } - - if let Some(room_id_server_name) = room_id.server_name() { - servers.insert(room_id_server_name.to_owned()); - } - - debug_info!("servers in remote_leave_room: {servers:?}"); - - for remote_server in servers { - let make_leave_response = services - .sending - .send_federation_request( - &remote_server, - federation::membership::prepare_leave_event::v1::Request { - room_id: room_id.to_owned(), - user_id: user_id.to_owned(), - }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let Some(room_version_id) = make_leave_response.room_version else { - return Err!(BadServerResponse(warn!( - "No room version was returned by {remote_server} for {room_id}, room version is \ - likely not supported by conduwuit" - ))); - }; - - if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse(warn!( - "Remote room version {room_version_id} for {room_id} is not supported by conduwuit", - ))); - } - - let mut leave_event_stub = serde_json::from_str::( - make_leave_response.event.get(), - ) - .map_err(|e| { - err!(BadServerResponse(warn!( - "Invalid make_leave event json received from {remote_server} for {room_id}: {e:?}" - ))) - })?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // Inject the reason key into the event content dict if it exists - if let Some(reason) = reason { - if let Some(CanonicalJsonValue::Object(content)) = leave_event_stub.get_mut("content") { - content.insert("reason".to_owned(), CanonicalJsonValue::String(reason)); - } - } - - // room v3 and above removed the "event_id" field from remote PDU format - match room_version_id { - | RoomVersionId::V1 | RoomVersionId::V2 => {}, - | _ => { - leave_event_stub.remove("event_id"); - }, - } - - // In order to create a compatible ref hash (EventID) the `hashes` field needs - // to be present - services - .server_keys - .hash_and_sign_event(&mut leave_event_stub, &room_version_id)?; - - // Generate event id - let event_id = gen_event_id(&leave_event_stub, &room_version_id)?; - - // Add event_id back - leave_event_stub - .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - services - .sending - .send_federation_request( - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id: room_id.to_owned(), - event_id, - pdu: services - .sending - .convert_to_outgoing_federation_event(leave_event.clone()) - .await, - }, - ) - .await?; - - Ok(()) -} - -async fn knock_room_by_id_helper( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - reason: Option, - servers: &[OwnedServerName], -) -> Result { - let state_lock = services.rooms.state.mutex.lock(room_id).await; - - if services - .rooms - .state_cache - .is_invited(sender_user, room_id) - .await - { - debug_warn!("{sender_user} is already invited in {room_id} but attempted to knock"); - return Err!(Request(Forbidden( - "You cannot knock on a room you are already invited/accepted to." - ))); - } - - if services - .rooms - .state_cache - .is_joined(sender_user, room_id) - .await - { - debug_warn!("{sender_user} is already joined in {room_id} but attempted to knock"); - return Err!(Request(Forbidden("You cannot knock on a room you are already joined in."))); - } - - if services - .rooms - .state_cache - .is_knocked(sender_user, room_id) - .await - { - debug_warn!("{sender_user} is already knocked in {room_id}"); - return Ok(knock_room::v3::Response { room_id: room_id.into() }); - } - - if let Ok(membership) = services - .rooms - .state_accessor - .get_member(room_id, sender_user) - .await - { - if membership.membership == MembershipState::Ban { - debug_warn!("{sender_user} is banned from {room_id} but attempted to knock"); - return Err!(Request(Forbidden("You cannot knock on a room you are banned from."))); - } - } - - // For knock_restricted rooms, check if the user meets the restricted conditions - // If they do, attempt to join instead of knock - // This is not mentioned in the spec, but should be allowable (we're allowed to - // auto-join invites to knocked rooms) - let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await; - if let JoinRule::KnockRestricted(restricted) = &join_rule { - let restriction_rooms: Vec<_> = restricted - .allow - .iter() - .filter_map(|a| match a { - | AllowRule::RoomMembership(r) => Some(&r.room_id), - | _ => None, - }) - .collect(); - - // Check if the user is in any of the allowed rooms - let mut user_meets_restrictions = false; - for restriction_room_id in &restriction_rooms { - if services - .rooms - .state_cache - .is_joined(sender_user, restriction_room_id) - .await - { - user_meets_restrictions = true; - break; - } - } - - // If the user meets the restrictions, try joining instead - if user_meets_restrictions { - debug_info!( - "{sender_user} meets the restricted criteria in knock_restricted room \ - {room_id}, attempting to join instead of knock" - ); - // For this case, we need to drop the state lock and get a new one in - // join_room_by_id_helper We need to release the lock here and let - // join_room_by_id_helper acquire it again - drop(state_lock); - match join_room_by_id_helper( - services, - sender_user, - room_id, - reason.clone(), - servers, - None, - &None, - ) - .await - { - | Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())), - | Err(e) => { - debug_warn!( - "Failed to convert knock to join for {sender_user} in {room_id}: {e:?}" - ); - // Get a new state lock for the remaining knock logic - let new_state_lock = services.rooms.state.mutex.lock(room_id).await; - - let server_in_room = services - .rooms - .state_cache - .server_in_room(services.globals.server_name(), room_id) - .await; - - let local_knock = server_in_room - || servers.is_empty() - || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); - - if local_knock { - knock_room_helper_local( - services, - sender_user, - room_id, - reason, - servers, - new_state_lock, - ) - .boxed() - .await?; - } else { - knock_room_helper_remote( - services, - sender_user, - room_id, - reason, - servers, - new_state_lock, - ) - .boxed() - .await?; - } - - return Ok(knock_room::v3::Response::new(room_id.to_owned())); - }, - } - } - } else if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) { - debug_warn!( - "{sender_user} attempted to knock on room {room_id} but its join rule is \ - {join_rule:?}, not knock or knock_restricted" - ); - } - - let server_in_room = services - .rooms - .state_cache - .server_in_room(services.globals.server_name(), room_id) - .await; - - let local_knock = server_in_room - || servers.is_empty() - || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); - - if local_knock { - knock_room_helper_local(services, sender_user, room_id, reason, servers, state_lock) - .boxed() - .await?; - } else { - knock_room_helper_remote(services, sender_user, room_id, reason, servers, state_lock) - .boxed() - .await?; - } - - Ok(knock_room::v3::Response::new(room_id.to_owned())) -} - -async fn knock_room_helper_local( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - reason: Option, - servers: &[OwnedServerName], - state_lock: RoomMutexGuard, -) -> Result { - debug_info!("We can knock locally"); - - let room_version_id = services.rooms.state.get_room_version(room_id).await?; - - if matches!( - room_version_id, - RoomVersionId::V1 - | RoomVersionId::V2 - | RoomVersionId::V3 - | RoomVersionId::V4 - | RoomVersionId::V5 - | RoomVersionId::V6 - ) { - return Err!(Request(Forbidden("This room does not support knocking."))); - } - - // Verify that this room has a valid knock or knock_restricted join rule - let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await; - if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) { - return Err!(Request(Forbidden("This room's join rule does not allow knocking."))); - } - - let content = RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason: reason.clone(), - ..RoomMemberEventContent::new(MembershipState::Knock) - }; - - // Try normal knock first - let Err(error) = services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder::state(sender_user.to_string(), &content), - sender_user, - room_id, - &state_lock, - ) - .await - else { - return Ok(()); - }; - - if servers.is_empty() || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])) - { - return Err(error); - } - - warn!("We couldn't do the knock locally, maybe federation can help to satisfy the knock"); - - let (make_knock_response, remote_server) = - make_knock_request(services, sender_user, room_id, servers).await?; - - info!("make_knock finished"); - - let room_version_id = make_knock_response.room_version; - - if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); - } - - let mut knock_event_stub = serde_json::from_str::( - make_knock_response.event.get(), - ) - .map_err(|e| { - err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}")) - })?; - - knock_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), - ); - knock_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - knock_event_stub.insert( - "content".to_owned(), - to_canonical_value(RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason, - ..RoomMemberEventContent::new(MembershipState::Knock) - }) - .expect("event is valid, we just created it"), - ); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs - // to be present - services - .server_keys - .hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; - - // Generate event id - let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; - - // Add event_id - knock_event_stub - .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); - - // It has enough fields to be called a proper event now - let knock_event = knock_event_stub; - - info!("Asking {remote_server} for send_knock in room {room_id}"); - let send_knock_request = federation::knock::send_knock::v1::Request { - room_id: room_id.to_owned(), - event_id: event_id.clone(), - pdu: services - .sending - .convert_to_outgoing_federation_event(knock_event.clone()) - .await, - }; - - let send_knock_response = services - .sending - .send_federation_request(&remote_server, send_knock_request) - .await?; - - info!("send_knock finished"); - - services - .rooms - .short - .get_or_create_shortroomid(room_id) - .await; - - info!("Parsing knock event"); - - let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone()) - .map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?; - - info!("Updating membership locally to knock state with provided stripped state events"); - services - .rooms - .state_cache - .update_membership( - room_id, - sender_user, - parsed_knock_pdu - .get_content::() - .expect("we just created this"), - sender_user, - Some(send_knock_response.knock_room_state), - None, - false, - ) - .await?; - - info!("Appending room knock event locally"); - services - .rooms - .timeline - .append_pdu( - &parsed_knock_pdu, - knock_event, - once(parsed_knock_pdu.event_id.borrow()), - &state_lock, - ) - .await?; - - Ok(()) -} - -async fn knock_room_helper_remote( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - reason: Option, - servers: &[OwnedServerName], - state_lock: RoomMutexGuard, -) -> Result { - info!("Knocking {room_id} over federation."); - - let (make_knock_response, remote_server) = - make_knock_request(services, sender_user, room_id, servers).await?; - - info!("make_knock finished"); - - let room_version_id = make_knock_response.room_version; - - if !services.server.supported_room_version(&room_version_id) { - return Err!(BadServerResponse( - "Remote room version {room_version_id} is not supported by conduwuit" - )); - } - - let mut knock_event_stub: CanonicalJsonObject = - serde_json::from_str(make_knock_response.event.get()).map_err(|e| { - err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}")) - })?; - - knock_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), - ); - knock_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - knock_event_stub.insert( - "content".to_owned(), - to_canonical_value(RoomMemberEventContent { - displayname: services.users.displayname(sender_user).await.ok(), - avatar_url: services.users.avatar_url(sender_user).await.ok(), - blurhash: services.users.blurhash(sender_user).await.ok(), - reason, - ..RoomMemberEventContent::new(MembershipState::Knock) - }) - .expect("event is valid, we just created it"), - ); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs - // to be present - services - .server_keys - .hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; - - // Generate event id - let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; - - // Add event_id - knock_event_stub - .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); - - // It has enough fields to be called a proper event now - let knock_event = knock_event_stub; - - info!("Asking {remote_server} for send_knock in room {room_id}"); - let send_knock_request = federation::knock::send_knock::v1::Request { - room_id: room_id.to_owned(), - event_id: event_id.clone(), - pdu: services - .sending - .convert_to_outgoing_federation_event(knock_event.clone()) - .await, - }; - - let send_knock_response = services - .sending - .send_federation_request(&remote_server, send_knock_request) - .await?; - - info!("send_knock finished"); - - services - .rooms - .short - .get_or_create_shortroomid(room_id) - .await; - - info!("Parsing knock event"); - let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone()) - .map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?; - - info!("Going through send_knock response knock state events"); - let state = send_knock_response - .knock_room_state - .iter() - .map(|event| serde_json::from_str::(event.clone().into_json().get())) - .filter_map(Result::ok); - - let mut state_map: HashMap = HashMap::new(); - - for event in state { - let Some(state_key) = event.get("state_key") else { - debug_warn!("send_knock stripped state event missing state_key: {event:?}"); - continue; - }; - let Some(event_type) = event.get("type") else { - debug_warn!("send_knock stripped state event missing event type: {event:?}"); - continue; - }; - - let Ok(state_key) = serde_json::from_value::(state_key.clone().into()) else { - debug_warn!("send_knock stripped state event has invalid state_key: {event:?}"); - continue; - }; - let Ok(event_type) = serde_json::from_value::(event_type.clone().into()) - else { - debug_warn!("send_knock stripped state event has invalid event type: {event:?}"); - continue; - }; - - let event_id = gen_event_id(&event, &room_version_id)?; - let shortstatekey = services - .rooms - .short - .get_or_create_shortstatekey(&event_type, &state_key) - .await; - - services.rooms.outlier.add_pdu_outlier(&event_id, &event); - state_map.insert(shortstatekey, event_id.clone()); - } - - info!("Compressing state from send_knock"); - let compressed: CompressedState = services - .rooms - .state_compressor - .compress_state_events(state_map.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) - .collect() - .await; - - debug!("Saving compressed state"); - let HashSetCompressStateEvent { - shortstatehash: statehash_before_knock, - added, - removed, - } = services - .rooms - .state_compressor - .save_state(room_id, Arc::new(compressed)) - .await?; - - debug!("Forcing state for new room"); - services - .rooms - .state - .force_state(room_id, statehash_before_knock, added, removed, &state_lock) - .await?; - - let statehash_after_knock = services - .rooms - .state - .append_to_state(&parsed_knock_pdu) - .await?; - - info!("Updating membership locally to knock state with provided stripped state events"); - services - .rooms - .state_cache - .update_membership( - room_id, - sender_user, - parsed_knock_pdu - .get_content::() - .expect("we just created this"), - sender_user, - Some(send_knock_response.knock_room_state), - None, - false, - ) - .await?; - - info!("Appending room knock event locally"); - services - .rooms - .timeline - .append_pdu( - &parsed_knock_pdu, - knock_event, - once(parsed_knock_pdu.event_id.borrow()), - &state_lock, - ) - .await?; - - info!("Setting final room state for new room"); - // We set the room state after inserting the pdu, so that we never have a moment - // in time where events in the current room state do not exist - services - .rooms - .state - .set_room_state(room_id, statehash_after_knock, &state_lock); - - Ok(()) -} - -async fn make_knock_request( - services: &Services, - sender_user: &UserId, - room_id: &RoomId, - servers: &[OwnedServerName], -) -> Result<(federation::knock::create_knock_event_template::v1::Response, OwnedServerName)> { - let mut make_knock_response_and_server = - Err!(BadServerResponse("No server available to assist in knocking.")); - - let mut make_knock_counter: usize = 0; - - for remote_server in servers { - if services.globals.server_is_ours(remote_server) { - continue; - } - - info!("Asking {remote_server} for make_knock ({make_knock_counter})"); - - let make_knock_response = services - .sending - .send_federation_request( - remote_server, - federation::knock::create_knock_event_template::v1::Request { - room_id: room_id.to_owned(), - user_id: sender_user.to_owned(), - ver: services.server.supported_room_versions().collect(), - }, - ) - .await; - - trace!("make_knock response: {make_knock_response:?}"); - make_knock_counter = make_knock_counter.saturating_add(1); - - make_knock_response_and_server = make_knock_response.map(|r| (r, remote_server.clone())); - - if make_knock_response_and_server.is_ok() { - break; - } - - if make_knock_counter > 40 { - warn!( - "50 servers failed to provide valid make_knock response, assuming no server can \ - assist in knocking." - ); - make_knock_response_and_server = - Err!(BadServerResponse("No server available to assist in knocking.")); - - return make_knock_response_and_server; - } - } - - make_knock_response_and_server -} diff --git a/src/api/client/membership/ban.rs b/src/api/client/membership/ban.rs new file mode 100644 index 00000000..339dcf2e --- /dev/null +++ b/src/api/client/membership/ban.rs @@ -0,0 +1,60 @@ +use axum::extract::State; +use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; +use ruma::{ + api::client::membership::ban_user, + events::room::member::{MembershipState, RoomMemberEventContent}, +}; + +use crate::Ruma; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/ban` +/// +/// Tries to send a ban event into the room. +pub(crate) async fn ban_user_route( + State(services): State, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + + if sender_user == body.user_id { + return Err!(Request(Forbidden("You cannot ban yourself."))); + } + + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + + let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + + let current_member_content = services + .rooms + .state_accessor + .get_member(&body.room_id, &body.user_id) + .await + .unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Ban)); + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Ban, + reason: body.reason.clone(), + displayname: None, // display name may be offensive + avatar_url: None, // avatar may be offensive + is_direct: None, + join_authorized_via_users_server: None, + third_party_invite: None, + redact_events: body.redact_events, + ..current_member_content + }), + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(ban_user::v3::Response::new()) +} diff --git a/src/api/client/membership/forget.rs b/src/api/client/membership/forget.rs new file mode 100644 index 00000000..7f3a1a57 --- /dev/null +++ b/src/api/client/membership/forget.rs @@ -0,0 +1,52 @@ +use axum::extract::State; +use conduwuit::{Err, Result, is_matching, result::NotFound, utils::FutureBoolExt}; +use futures::pin_mut; +use ruma::{api::client::membership::forget_room, events::room::member::MembershipState}; + +use crate::Ruma; + +/// # `POST /_matrix/client/v3/rooms/{roomId}/forget` +/// +/// Forgets about a room. +/// +/// - If the sender user currently left the room: Stops sender user from +/// receiving information about the room +/// +/// Note: Other devices of the user have no way of knowing the room was +/// forgotten, so this has to be called from every device +pub(crate) async fn forget_room_route( + State(services): State, + body: Ruma, +) -> Result { + let user_id = body.sender_user(); + let room_id = &body.room_id; + + let joined = services.rooms.state_cache.is_joined(user_id, room_id); + let knocked = services.rooms.state_cache.is_knocked(user_id, room_id); + let invited = services.rooms.state_cache.is_invited(user_id, room_id); + + pin_mut!(joined, knocked, invited); + if joined.or(knocked).or(invited).await { + return Err!(Request(Unknown("You must leave the room before forgetting it"))); + } + + let membership = services + .rooms + .state_accessor + .get_member(room_id, user_id) + .await; + + if membership.is_not_found() { + return Err!(Request(Unknown("No membership event was found, room was never joined"))); + } + + let non_membership = membership + .map(|member| member.membership) + .is_ok_and(is_matching!(MembershipState::Leave | MembershipState::Ban)); + + if non_membership || services.rooms.state_cache.is_left(user_id, room_id).await { + services.rooms.state_cache.forget(room_id, user_id); + } + + Ok(forget_room::v3::Response::new()) +} diff --git a/src/api/client/membership/invite.rs b/src/api/client/membership/invite.rs new file mode 100644 index 00000000..4ca3efb8 --- /dev/null +++ b/src/api/client/membership/invite.rs @@ -0,0 +1,238 @@ +use axum::extract::State; +use axum_client_ip::InsecureClientIp; +use conduwuit::{ + Err, Result, debug_error, err, info, + matrix::pdu::{PduBuilder, gen_event_id_canonical_json}, +}; +use futures::{FutureExt, join}; +use ruma::{ + OwnedServerName, RoomId, UserId, + api::{client::membership::invite_user, federation::membership::create_invite}, + events::room::member::{MembershipState, RoomMemberEventContent}, +}; +use service::Services; + +use super::banned_room_check; +use crate::Ruma; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/invite` +/// +/// Tries to send an invite event into the room. +#[tracing::instrument(skip_all, fields(%client), name = "invite")] +pub(crate) async fn invite_user_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + + if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { + debug_error!( + "User {sender_user} is not an admin and attempted to send an invite to room {}", + &body.room_id + ); + return Err!(Request(Forbidden("Invites are not allowed on this server."))); + } + + banned_room_check( + &services, + sender_user, + Some(&body.room_id), + body.room_id.server_name(), + client, + ) + .await?; + + match &body.recipient { + | invite_user::v3::InvitationRecipient::UserId { user_id } => { + let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); + let recipient_ignored_by_sender = + services.users.user_is_ignored(user_id, sender_user); + + let (sender_ignored_recipient, recipient_ignored_by_sender) = + join!(sender_ignored_recipient, recipient_ignored_by_sender); + + if sender_ignored_recipient { + return Ok(invite_user::v3::Response {}); + } + + if let Ok(target_user_membership) = services + .rooms + .state_accessor + .get_member(&body.room_id, user_id) + .await + { + if target_user_membership.membership == MembershipState::Ban { + return Err!(Request(Forbidden("User is banned from this room."))); + } + } + + if recipient_ignored_by_sender { + // silently drop the invite to the recipient if they've been ignored by the + // sender, pretend it worked + return Ok(invite_user::v3::Response {}); + } + + invite_helper( + &services, + sender_user, + user_id, + &body.room_id, + body.reason.clone(), + false, + ) + .boxed() + .await?; + + Ok(invite_user::v3::Response {}) + }, + | _ => { + Err!(Request(NotFound("User not found."))) + }, + } +} + +pub(crate) async fn invite_helper( + services: &Services, + sender_user: &UserId, + user_id: &UserId, + room_id: &RoomId, + reason: Option, + is_direct: bool, +) -> Result { + if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { + info!( + "User {sender_user} is not an admin and attempted to send an invite to room \ + {room_id}" + ); + return Err!(Request(Forbidden("Invites are not allowed on this server."))); + } + + if !services.globals.user_is_local(user_id) { + let (pdu, pdu_json, invite_room_state) = { + let state_lock = services.rooms.state.mutex.lock(room_id).await; + + let content = RoomMemberEventContent { + avatar_url: services.users.avatar_url(user_id).await.ok(), + is_direct: Some(is_direct), + reason, + ..RoomMemberEventContent::new(MembershipState::Invite) + }; + + let (pdu, pdu_json) = services + .rooms + .timeline + .create_hash_and_sign_event( + PduBuilder::state(user_id.to_string(), &content), + sender_user, + room_id, + &state_lock, + ) + .await?; + + let invite_room_state = services.rooms.state.summary_stripped(&pdu).await; + + drop(state_lock); + + (pdu, pdu_json, invite_room_state) + }; + + let room_version_id = services.rooms.state.get_room_version(room_id).await?; + + let response = services + .sending + .send_federation_request(user_id.server_name(), create_invite::v2::Request { + room_id: room_id.to_owned(), + event_id: (*pdu.event_id).to_owned(), + room_version: room_version_id.clone(), + event: services + .sending + .convert_to_outgoing_federation_event(pdu_json.clone()) + .await, + invite_room_state, + via: services + .rooms + .state_cache + .servers_route_via(room_id) + .await + .ok(), + }) + .await?; + + // We do not add the event_id field to the pdu here because of signature and + // hashes checks + let (event_id, value) = gen_event_id_canonical_json(&response.event, &room_version_id) + .map_err(|e| { + err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))) + })?; + + if pdu.event_id != event_id { + return Err!(Request(BadJson(warn!( + %pdu.event_id, %event_id, + "Server {} sent event with wrong event ID", + user_id.server_name() + )))); + } + + let origin: OwnedServerName = serde_json::from_value(serde_json::to_value( + value + .get("origin") + .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, + )?) + .map_err(|e| { + err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}")))) + })?; + + let pdu_id = services + .rooms + .event_handler + .handle_incoming_pdu(&origin, room_id, &event_id, value, true) + .boxed() + .await? + .ok_or_else(|| { + err!(Request(InvalidParam("Could not accept incoming PDU as timeline event."))) + })?; + + return services.sending.send_pdu_room(room_id, &pdu_id).await; + } + + if !services + .rooms + .state_cache + .is_joined(sender_user, room_id) + .await + { + return Err!(Request(Forbidden( + "You must be joined in the room you are trying to invite from." + ))); + } + + let state_lock = services.rooms.state.mutex.lock(room_id).await; + + let content = RoomMemberEventContent { + displayname: services.users.displayname(user_id).await.ok(), + avatar_url: services.users.avatar_url(user_id).await.ok(), + blurhash: services.users.blurhash(user_id).await.ok(), + is_direct: Some(is_direct), + reason, + ..RoomMemberEventContent::new(MembershipState::Invite) + }; + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(user_id.to_string(), &content), + sender_user, + room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(()) +} diff --git a/src/api/client/membership/join.rs b/src/api/client/membership/join.rs new file mode 100644 index 00000000..669e9399 --- /dev/null +++ b/src/api/client/membership/join.rs @@ -0,0 +1,988 @@ +use std::{borrow::Borrow, collections::HashMap, iter::once, sync::Arc}; + +use axum::extract::State; +use axum_client_ip::InsecureClientIp; +use conduwuit::{ + Err, Result, debug, debug_info, debug_warn, err, error, info, + matrix::{ + StateKey, + pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, + state_res, + }, + result::FlatOk, + trace, + utils::{ + self, shuffle, + stream::{IterStream, ReadyExt}, + }, + warn, +}; +use futures::{FutureExt, StreamExt}; +use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, + RoomVersionId, UserId, + api::{ + client::{ + error::ErrorKind, + membership::{ThirdPartySigned, join_room_by_id, join_room_by_id_or_alias}, + }, + federation::{self}, + }, + canonical_json::to_canonical_value, + events::{ + StateEventType, + room::{ + join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + }, +}; +use service::{ + Services, + appservice::RegistrationInfo, + rooms::{ + state::RoomMutexGuard, + state_compressor::{CompressedState, HashSetCompressStateEvent}, + }, +}; + +use super::banned_room_check; +use crate::Ruma; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/join` +/// +/// Tries to join the sender user into a room. +/// +/// - If the server knowns about this room: creates the join event and does auth +/// rules locally +/// - If the server does not know about the room: asks other servers over +/// federation +#[tracing::instrument(skip_all, fields(%client), name = "join")] +pub(crate) async fn join_room_by_id_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + + banned_room_check( + &services, + sender_user, + Some(&body.room_id), + body.room_id.server_name(), + client, + ) + .await?; + + // There is no body.server_name for /roomId/join + let mut servers: Vec<_> = services + .rooms + .state_cache + .servers_invite_via(&body.room_id) + .map(ToOwned::to_owned) + .collect() + .await; + + servers.extend( + services + .rooms + .state_cache + .invite_state(sender_user, &body.room_id) + .await + .unwrap_or_default() + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + + if let Some(server) = body.room_id.server_name() { + servers.push(server.into()); + } + + servers.sort_unstable(); + servers.dedup(); + shuffle(&mut servers); + + join_room_by_id_helper( + &services, + sender_user, + &body.room_id, + body.reason.clone(), + &servers, + body.third_party_signed.as_ref(), + &body.appservice_info, + ) + .boxed() + .await +} + +/// # `POST /_matrix/client/r0/join/{roomIdOrAlias}` +/// +/// Tries to join the sender user into a room. +/// +/// - If the server knowns about this room: creates the join event and does auth +/// rules locally +/// - If the server does not know about the room: use the server name query +/// param if specified. if not specified, asks other servers over federation +/// via room alias server name and room ID server name +#[tracing::instrument(skip_all, fields(%client), name = "join")] +pub(crate) async fn join_room_by_id_or_alias_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + let appservice_info = &body.appservice_info; + let body = &body.body; + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { + | Ok(room_id) => { + banned_room_check( + &services, + sender_user, + Some(&room_id), + room_id.server_name(), + client, + ) + .boxed() + .await?; + + let mut servers = body.via.clone(); + servers.extend( + services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned) + .collect::>() + .await, + ); + + servers.extend( + services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default() + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + + if let Some(server) = room_id.server_name() { + servers.push(server.to_owned()); + } + + servers.sort_unstable(); + servers.dedup(); + shuffle(&mut servers); + + (servers, room_id) + }, + | Err(room_alias) => { + let (room_id, mut servers) = services + .rooms + .alias + .resolve_alias(&room_alias, Some(body.via.clone())) + .await?; + + banned_room_check( + &services, + sender_user, + Some(&room_id), + Some(room_alias.server_name()), + client, + ) + .await?; + + let addl_via_servers = services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned); + + let addl_state_servers = services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default(); + + let mut addl_servers: Vec<_> = addl_state_servers + .iter() + .map(|event| event.get_field("sender")) + .filter_map(FlatOk::flat_ok) + .map(|user: &UserId| user.server_name().to_owned()) + .stream() + .chain(addl_via_servers) + .collect() + .await; + + addl_servers.sort_unstable(); + addl_servers.dedup(); + shuffle(&mut addl_servers); + servers.append(&mut addl_servers); + + (servers, room_id) + }, + }; + + let join_room_response = join_room_by_id_helper( + &services, + sender_user, + &room_id, + body.reason.clone(), + &servers, + body.third_party_signed.as_ref(), + appservice_info, + ) + .boxed() + .await?; + + Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id }) +} + +pub async fn join_room_by_id_helper( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + third_party_signed: Option<&ThirdPartySigned>, + appservice_info: &Option, +) -> Result { + let state_lock = services.rooms.state.mutex.lock(room_id).await; + + let user_is_guest = services + .users + .is_deactivated(sender_user) + .await + .unwrap_or(false) + && appservice_info.is_none(); + + if user_is_guest && !services.rooms.state_accessor.guest_can_join(room_id).await { + return Err!(Request(Forbidden("Guests are not allowed to join this room"))); + } + + if services + .rooms + .state_cache + .is_joined(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already joined in {room_id}"); + return Ok(join_room_by_id::v3::Response { room_id: room_id.into() }); + } + + let server_in_room = services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await; + + // Only check our known membership if we're already in the room. + // See: https://forgejo.ellis.link/continuwuation/continuwuity/issues/855 + let membership = if server_in_room { + services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .await + } else { + debug!("Ignoring local state for join {room_id}, we aren't in the room yet."); + Ok(RoomMemberEventContent::new(MembershipState::Leave)) + }; + if let Ok(m) = membership { + if m.membership == MembershipState::Ban { + debug_warn!("{sender_user} is banned from {room_id} but attempted to join"); + // TODO: return reason + return Err!(Request(Forbidden("You are banned from the room."))); + } + } + + let local_join = server_in_room + || servers.is_empty() + || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); + + if local_join { + join_room_by_id_helper_local( + services, + sender_user, + room_id, + reason, + servers, + third_party_signed, + state_lock, + ) + .boxed() + .await?; + } else { + // Ask a remote server if we are not participating in this room + join_room_by_id_helper_remote( + services, + sender_user, + room_id, + reason, + servers, + third_party_signed, + state_lock, + ) + .boxed() + .await?; + } + + Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) +} + +#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote")] +async fn join_room_by_id_helper_remote( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + _third_party_signed: Option<&ThirdPartySigned>, + state_lock: RoomMutexGuard, +) -> Result { + info!("Joining {room_id} over federation."); + + let (make_join_response, remote_server) = + make_join_request(services, sender_user, room_id, servers).await?; + + info!("make_join finished"); + + let Some(room_version_id) = make_join_response.room_version else { + return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); + }; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + + let mut join_event_stub: CanonicalJsonObject = + serde_json::from_str(make_join_response.event.get()).map_err(|e| { + err!(BadServerResponse(warn!( + "Invalid make_join event json received from server: {e:?}" + ))) + })?; + + let join_authorized_via_users_server = { + use RoomVersionId::*; + if !matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { + join_event_stub + .get("content") + .map(|s| { + s.as_object()? + .get("join_authorised_via_users_server")? + .as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()) + } else { + None + } + }; + + join_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + join_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + join_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + join_authorized_via_users_server: join_authorized_via_users_server.clone(), + ..RoomMemberEventContent::new(MembershipState::Join) + }) + .expect("event is valid, we just created it"), + ); + + // We keep the "event_id" in the pdu only in v1 or + // v2 rooms + match room_version_id { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => { + join_event_stub.remove("event_id"); + }, + } + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&join_event_stub, &room_version_id)?; + + // Add event_id back + join_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let mut join_event = join_event_stub; + + info!("Asking {remote_server} for send_join in room {room_id}"); + let send_join_request = federation::membership::create_join_event::v2::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + omit_members: false, + pdu: services + .sending + .convert_to_outgoing_federation_event(join_event.clone()) + .await, + }; + + let send_join_response = match services + .sending + .send_synapse_request(&remote_server, send_join_request) + .await + { + | Ok(response) => response, + | Err(e) => { + error!("send_join failed: {e}"); + return Err(e); + }, + }; + + info!("send_join finished"); + + if join_authorized_via_users_server.is_some() { + if let Some(signed_raw) = &send_join_response.room_state.event { + debug_info!( + "There is a signed event with join_authorized_via_users_server. This room is \ + probably using restricted joins. Adding signature to our event" + ); + + let (signed_event_id, signed_value) = + gen_event_id_canonical_json(signed_raw, &room_version_id).map_err(|e| { + err!(Request(BadJson(warn!( + "Could not convert event to canonical JSON: {e}" + )))) + })?; + + if signed_event_id != event_id { + return Err!(Request(BadJson(warn!( + %signed_event_id, %event_id, + "Server {remote_server} sent event with wrong event ID" + )))); + } + + match signed_value["signatures"] + .as_object() + .ok_or_else(|| { + err!(BadServerResponse(warn!( + "Server {remote_server} sent invalid signatures type" + ))) + }) + .and_then(|e| { + e.get(remote_server.as_str()).ok_or_else(|| { + err!(BadServerResponse(warn!( + "Server {remote_server} did not send its signature for a restricted \ + room" + ))) + }) + }) { + | Ok(signature) => { + join_event + .get_mut("signatures") + .expect("we created a valid pdu") + .as_object_mut() + .expect("we created a valid pdu") + .insert(remote_server.to_string(), signature.clone()); + }, + | Err(e) => { + warn!( + "Server {remote_server} sent invalid signature in send_join signatures \ + for event {signed_value:?}: {e:?}", + ); + }, + } + } + } + + services + .rooms + .short + .get_or_create_shortroomid(room_id) + .await; + + info!("Parsing join event"); + let parsed_join_pdu = PduEvent::from_id_val(&event_id, join_event.clone()) + .map_err(|e| err!(BadServerResponse("Invalid join event PDU: {e:?}")))?; + + info!("Acquiring server signing keys for response events"); + let resp_events = &send_join_response.room_state; + let resp_state = &resp_events.state; + let resp_auth = &resp_events.auth_chain; + services + .server_keys + .acquire_events_pubkeys(resp_auth.iter().chain(resp_state.iter())) + .await; + + info!("Going through send_join response room_state"); + let cork = services.db.cork_and_flush(); + let state = send_join_response + .room_state + .state + .iter() + .stream() + .then(|pdu| { + services + .server_keys + .validate_and_add_event_id_no_fetch(pdu, &room_version_id) + }) + .ready_filter_map(Result::ok) + .fold(HashMap::new(), |mut state, (event_id, value)| async move { + let pdu = match PduEvent::from_id_val(&event_id, value.clone()) { + | Ok(pdu) => pdu, + | Err(e) => { + debug_warn!("Invalid PDU in send_join response: {e:?}: {value:#?}"); + return state; + }, + }; + + services.rooms.outlier.add_pdu_outlier(&event_id, &value); + if let Some(state_key) = &pdu.state_key { + let shortstatekey = services + .rooms + .short + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key) + .await; + + state.insert(shortstatekey, pdu.event_id.clone()); + } + + state + }) + .await; + + drop(cork); + + info!("Going through send_join response auth_chain"); + let cork = services.db.cork_and_flush(); + send_join_response + .room_state + .auth_chain + .iter() + .stream() + .then(|pdu| { + services + .server_keys + .validate_and_add_event_id_no_fetch(pdu, &room_version_id) + }) + .ready_filter_map(Result::ok) + .ready_for_each(|(event_id, value)| { + services.rooms.outlier.add_pdu_outlier(&event_id, &value); + }) + .await; + + drop(cork); + + debug!("Running send_join auth check"); + let fetch_state = &state; + let state_fetch = |k: StateEventType, s: StateKey| async move { + let shortstatekey = services.rooms.short.get_shortstatekey(&k, &s).await.ok()?; + + let event_id = fetch_state.get(&shortstatekey)?; + services.rooms.timeline.get_pdu(event_id).await.ok() + }; + + let auth_check = state_res::event_auth::auth_check( + &state_res::RoomVersion::new(&room_version_id)?, + &parsed_join_pdu, + None, // TODO: third party invite + |k, s| state_fetch(k.clone(), s.into()), + ) + .await + .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; + + if !auth_check { + return Err!(Request(Forbidden("Auth check failed"))); + } + + info!("Compressing state from send_join"); + let compressed: CompressedState = services + .rooms + .state_compressor + .compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) + .collect() + .await; + + debug!("Saving compressed state"); + let HashSetCompressStateEvent { + shortstatehash: statehash_before_join, + added, + removed, + } = services + .rooms + .state_compressor + .save_state(room_id, Arc::new(compressed)) + .await?; + + debug!("Forcing state for new room"); + services + .rooms + .state + .force_state(room_id, statehash_before_join, added, removed, &state_lock) + .await?; + + info!("Updating joined counts for new room"); + services + .rooms + .state_cache + .update_joined_count(room_id) + .await; + + // We append to state before appending the pdu, so we don't have a moment in + // time with the pdu without it's state. This is okay because append_pdu can't + // fail. + let statehash_after_join = services + .rooms + .state + .append_to_state(&parsed_join_pdu) + .await?; + + info!("Appending new room join event"); + services + .rooms + .timeline + .append_pdu( + &parsed_join_pdu, + join_event, + once(parsed_join_pdu.event_id.borrow()), + &state_lock, + ) + .await?; + + info!("Setting final room state for new room"); + // We set the room state after inserting the pdu, so that we never have a moment + // in time where events in the current room state do not exist + services + .rooms + .state + .set_room_state(room_id, statehash_after_join, &state_lock); + + Ok(()) +} + +#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local")] +async fn join_room_by_id_helper_local( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + _third_party_signed: Option<&ThirdPartySigned>, + state_lock: RoomMutexGuard, +) -> Result { + debug_info!("We can join locally"); + + let join_rules_event_content = services + .rooms + .state_accessor + .room_state_get_content::( + room_id, + &StateEventType::RoomJoinRules, + "", + ) + .await; + + let restriction_rooms = match join_rules_event_content { + | Ok(RoomJoinRulesEventContent { + join_rule: JoinRule::Restricted(restricted) | JoinRule::KnockRestricted(restricted), + }) => restricted + .allow + .into_iter() + .filter_map(|a| match a { + | AllowRule::RoomMembership(r) => Some(r.room_id), + | _ => None, + }) + .collect(), + | _ => Vec::new(), + }; + + let join_authorized_via_users_server: Option = { + if restriction_rooms + .iter() + .stream() + .any(|restriction_room_id| { + services + .rooms + .state_cache + .is_joined(sender_user, restriction_room_id) + }) + .await + { + services + .rooms + .state_cache + .local_users_in_room(room_id) + .filter(|user| { + services.rooms.state_accessor.user_can_invite( + room_id, + user, + sender_user, + &state_lock, + ) + }) + .boxed() + .next() + .await + .map(ToOwned::to_owned) + } else { + None + } + }; + + let content = RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason: reason.clone(), + join_authorized_via_users_server, + ..RoomMemberEventContent::new(MembershipState::Join) + }; + + // Try normal join first + let Err(error) = services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(sender_user.to_string(), &content), + sender_user, + room_id, + &state_lock, + ) + .await + else { + return Ok(()); + }; + + if restriction_rooms.is_empty() + && (servers.is_empty() + || servers.len() == 1 && services.globals.server_is_ours(&servers[0])) + { + return Err(error); + } + + warn!( + "We couldn't do the join locally, maybe federation can help to satisfy the restricted \ + join requirements" + ); + let Ok((make_join_response, remote_server)) = + make_join_request(services, sender_user, room_id, servers).await + else { + return Err(error); + }; + + let Some(room_version_id) = make_join_response.room_version else { + return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); + }; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + + let mut join_event_stub: CanonicalJsonObject = + serde_json::from_str(make_join_response.event.get()).map_err(|e| { + err!(BadServerResponse("Invalid make_join event json received from server: {e:?}")) + })?; + + let join_authorized_via_users_server = join_event_stub + .get("content") + .map(|s| { + s.as_object()? + .get("join_authorised_via_users_server")? + .as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); + + join_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + join_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + join_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + join_authorized_via_users_server, + ..RoomMemberEventContent::new(MembershipState::Join) + }) + .expect("event is valid, we just created it"), + ); + + // We keep the "event_id" in the pdu only in v1 or + // v2 rooms + match room_version_id { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => { + join_event_stub.remove("event_id"); + }, + } + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&join_event_stub, &room_version_id)?; + + // Add event_id back + join_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let join_event = join_event_stub; + + let send_join_response = services + .sending + .send_synapse_request( + &remote_server, + federation::membership::create_join_event::v2::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + omit_members: false, + pdu: services + .sending + .convert_to_outgoing_federation_event(join_event.clone()) + .await, + }, + ) + .await?; + + if let Some(signed_raw) = send_join_response.room_state.event { + let (signed_event_id, signed_value) = + gen_event_id_canonical_json(&signed_raw, &room_version_id).map_err(|e| { + err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))) + })?; + + if signed_event_id != event_id { + return Err!(Request(BadJson( + warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID") + ))); + } + + drop(state_lock); + services + .rooms + .event_handler + .handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true) + .boxed() + .await?; + } else { + return Err(error); + } + + Ok(()) +} + +async fn make_join_request( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + servers: &[OwnedServerName], +) -> Result<(federation::membership::prepare_join_event::v1::Response, OwnedServerName)> { + let mut make_join_response_and_server = + Err!(BadServerResponse("No server available to assist in joining.")); + + let mut make_join_counter: usize = 0; + let mut incompatible_room_version_count: usize = 0; + + for remote_server in servers { + if services.globals.server_is_ours(remote_server) { + continue; + } + info!("Asking {remote_server} for make_join ({make_join_counter})"); + let make_join_response = services + .sending + .send_federation_request( + remote_server, + federation::membership::prepare_join_event::v1::Request { + room_id: room_id.to_owned(), + user_id: sender_user.to_owned(), + ver: services.server.supported_room_versions().collect(), + }, + ) + .await; + + trace!("make_join response: {:?}", make_join_response); + make_join_counter = make_join_counter.saturating_add(1); + + if let Err(ref e) = make_join_response { + if matches!( + e.kind(), + ErrorKind::IncompatibleRoomVersion { .. } | ErrorKind::UnsupportedRoomVersion + ) { + incompatible_room_version_count = + incompatible_room_version_count.saturating_add(1); + } + + if incompatible_room_version_count > 15 { + info!( + "15 servers have responded with M_INCOMPATIBLE_ROOM_VERSION or \ + M_UNSUPPORTED_ROOM_VERSION, assuming that conduwuit does not support the \ + room version {room_id}: {e}" + ); + make_join_response_and_server = + Err!(BadServerResponse("Room version is not supported by Conduwuit")); + return make_join_response_and_server; + } + + if make_join_counter > 40 { + warn!( + "40 servers failed to provide valid make_join response, assuming no server \ + can assist in joining." + ); + make_join_response_and_server = + Err!(BadServerResponse("No server available to assist in joining.")); + + return make_join_response_and_server; + } + } + + make_join_response_and_server = make_join_response.map(|r| (r, remote_server.clone())); + + if make_join_response_and_server.is_ok() { + break; + } + } + + make_join_response_and_server +} diff --git a/src/api/client/membership/kick.rs b/src/api/client/membership/kick.rs new file mode 100644 index 00000000..5e0e86e2 --- /dev/null +++ b/src/api/client/membership/kick.rs @@ -0,0 +1,65 @@ +use axum::extract::State; +use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; +use ruma::{ + api::client::membership::kick_user, + events::room::member::{MembershipState, RoomMemberEventContent}, +}; + +use crate::Ruma; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/kick` +/// +/// Tries to send a kick event into the room. +pub(crate) async fn kick_user_route( + State(services): State, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + + let Ok(event) = services + .rooms + .state_accessor + .get_member(&body.room_id, &body.user_id) + .await + else { + // copy synapse's behaviour of returning 200 without any change to the state + // instead of erroring on left users + return Ok(kick_user::v3::Response::new()); + }; + + if !matches!( + event.membership, + MembershipState::Invite | MembershipState::Knock | MembershipState::Join, + ) { + return Err!(Request(Forbidden( + "Cannot kick a user who is not apart of the room (current membership: {})", + event.membership + ))); + } + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Leave, + reason: body.reason.clone(), + is_direct: None, + join_authorized_via_users_server: None, + third_party_invite: None, + ..event + }), + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(kick_user::v3::Response::new()) +} diff --git a/src/api/client/membership/knock.rs b/src/api/client/membership/knock.rs new file mode 100644 index 00000000..544dcfb3 --- /dev/null +++ b/src/api/client/membership/knock.rs @@ -0,0 +1,767 @@ +use std::{borrow::Borrow, collections::HashMap, iter::once, sync::Arc}; + +use axum::extract::State; +use axum_client_ip::InsecureClientIp; +use conduwuit::{ + Err, Result, debug, debug_info, debug_warn, err, info, + matrix::pdu::{PduBuilder, PduEvent, gen_event_id}, + result::FlatOk, + trace, + utils::{self, shuffle, stream::IterStream}, + warn, +}; +use futures::{FutureExt, StreamExt}; +use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, RoomId, + RoomVersionId, UserId, + api::{ + client::knock::knock_room, + federation::{self}, + }, + canonical_json::to_canonical_value, + events::{ + StateEventType, + room::{ + join_rules::{AllowRule, JoinRule}, + member::{MembershipState, RoomMemberEventContent}, + }, + }, +}; +use service::{ + Services, + rooms::{ + state::RoomMutexGuard, + state_compressor::{CompressedState, HashSetCompressStateEvent}, + }, +}; + +use super::{banned_room_check, join::join_room_by_id_helper}; +use crate::Ruma; + +/// # `POST /_matrix/client/*/knock/{roomIdOrAlias}` +/// +/// Tries to knock the room to ask permission to join for the sender user. +#[tracing::instrument(skip_all, fields(%client), name = "knock")] +pub(crate) async fn knock_room_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + let body = &body.body; + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { + | Ok(room_id) => { + banned_room_check( + &services, + sender_user, + Some(&room_id), + room_id.server_name(), + client, + ) + .await?; + + let mut servers = body.via.clone(); + servers.extend( + services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned) + .collect::>() + .await, + ); + + servers.extend( + services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default() + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + + if let Some(server) = room_id.server_name() { + servers.push(server.to_owned()); + } + + servers.sort_unstable(); + servers.dedup(); + shuffle(&mut servers); + + (servers, room_id) + }, + | Err(room_alias) => { + let (room_id, mut servers) = services + .rooms + .alias + .resolve_alias(&room_alias, Some(body.via.clone())) + .await?; + + banned_room_check( + &services, + sender_user, + Some(&room_id), + Some(room_alias.server_name()), + client, + ) + .await?; + + let addl_via_servers = services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned); + + let addl_state_servers = services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default(); + + let mut addl_servers: Vec<_> = addl_state_servers + .iter() + .map(|event| event.get_field("sender")) + .filter_map(FlatOk::flat_ok) + .map(|user: &UserId| user.server_name().to_owned()) + .stream() + .chain(addl_via_servers) + .collect() + .await; + + addl_servers.sort_unstable(); + addl_servers.dedup(); + shuffle(&mut addl_servers); + servers.append(&mut addl_servers); + + (servers, room_id) + }, + }; + + knock_room_by_id_helper(&services, sender_user, &room_id, body.reason.clone(), &servers) + .boxed() + .await +} + +async fn knock_room_by_id_helper( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], +) -> Result { + let state_lock = services.rooms.state.mutex.lock(room_id).await; + + if services + .rooms + .state_cache + .is_invited(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already invited in {room_id} but attempted to knock"); + return Err!(Request(Forbidden( + "You cannot knock on a room you are already invited/accepted to." + ))); + } + + if services + .rooms + .state_cache + .is_joined(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already joined in {room_id} but attempted to knock"); + return Err!(Request(Forbidden("You cannot knock on a room you are already joined in."))); + } + + if services + .rooms + .state_cache + .is_knocked(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already knocked in {room_id}"); + return Ok(knock_room::v3::Response { room_id: room_id.into() }); + } + + if let Ok(membership) = services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .await + { + if membership.membership == MembershipState::Ban { + debug_warn!("{sender_user} is banned from {room_id} but attempted to knock"); + return Err!(Request(Forbidden("You cannot knock on a room you are banned from."))); + } + } + + // For knock_restricted rooms, check if the user meets the restricted conditions + // If they do, attempt to join instead of knock + // This is not mentioned in the spec, but should be allowable (we're allowed to + // auto-join invites to knocked rooms) + let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await; + + if let JoinRule::KnockRestricted(restricted) = &join_rule { + let restriction_rooms: Vec<_> = restricted + .allow + .iter() + .filter_map(|a| match a { + | AllowRule::RoomMembership(r) => Some(&r.room_id), + | _ => None, + }) + .collect(); + + // Check if the user is in any of the allowed rooms + let mut user_meets_restrictions = false; + for restriction_room_id in &restriction_rooms { + if services + .rooms + .state_cache + .is_joined(sender_user, restriction_room_id) + .await + { + user_meets_restrictions = true; + break; + } + } + + // If the user meets the restrictions, try joining instead + if user_meets_restrictions { + debug_info!( + "{sender_user} meets the restricted criteria in knock_restricted room \ + {room_id}, attempting to join instead of knock" + ); + // For this case, we need to drop the state lock and get a new one in + // join_room_by_id_helper We need to release the lock here and let + // join_room_by_id_helper acquire it again + drop(state_lock); + match join_room_by_id_helper( + services, + sender_user, + room_id, + reason.clone(), + servers, + None, + &None, + ) + .await + { + | Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())), + | Err(e) => { + debug_warn!( + "Failed to convert knock to join for {sender_user} in {room_id}: {e:?}" + ); + // Get a new state lock for the remaining knock logic + let new_state_lock = services.rooms.state.mutex.lock(room_id).await; + + let server_in_room = services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await; + + let local_knock = server_in_room + || servers.is_empty() + || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); + + if local_knock { + knock_room_helper_local( + services, + sender_user, + room_id, + reason, + servers, + new_state_lock, + ) + .boxed() + .await?; + } else { + knock_room_helper_remote( + services, + sender_user, + room_id, + reason, + servers, + new_state_lock, + ) + .boxed() + .await?; + } + + return Ok(knock_room::v3::Response::new(room_id.to_owned())); + }, + } + } + } else if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) { + debug_warn!( + "{sender_user} attempted to knock on room {room_id} but its join rule is \ + {join_rule:?}, not knock or knock_restricted" + ); + } + + let server_in_room = services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await; + + let local_knock = server_in_room + || servers.is_empty() + || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); + + if local_knock { + knock_room_helper_local(services, sender_user, room_id, reason, servers, state_lock) + .boxed() + .await?; + } else { + knock_room_helper_remote(services, sender_user, room_id, reason, servers, state_lock) + .boxed() + .await?; + } + + Ok(knock_room::v3::Response::new(room_id.to_owned())) +} + +async fn knock_room_helper_local( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + state_lock: RoomMutexGuard, +) -> Result { + debug_info!("We can knock locally"); + + let room_version_id = services.rooms.state.get_room_version(room_id).await?; + + if matches!( + room_version_id, + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + ) { + return Err!(Request(Forbidden("This room does not support knocking."))); + } + + let content = RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason: reason.clone(), + ..RoomMemberEventContent::new(MembershipState::Knock) + }; + + // Try normal knock first + let Err(error) = services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(sender_user.to_string(), &content), + sender_user, + room_id, + &state_lock, + ) + .await + else { + return Ok(()); + }; + + if servers.is_empty() || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])) + { + return Err(error); + } + + warn!("We couldn't do the knock locally, maybe federation can help to satisfy the knock"); + + let (make_knock_response, remote_server) = + make_knock_request(services, sender_user, room_id, servers).await?; + + info!("make_knock finished"); + + let room_version_id = make_knock_response.room_version; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + + let mut knock_event_stub = serde_json::from_str::( + make_knock_response.event.get(), + ) + .map_err(|e| { + err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}")) + })?; + + knock_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + knock_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + knock_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + ..RoomMemberEventContent::new(MembershipState::Knock) + }) + .expect("event is valid, we just created it"), + ); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; + + // Add event_id + knock_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let knock_event = knock_event_stub; + + info!("Asking {remote_server} for send_knock in room {room_id}"); + let send_knock_request = federation::knock::send_knock::v1::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + pdu: services + .sending + .convert_to_outgoing_federation_event(knock_event.clone()) + .await, + }; + + let send_knock_response = services + .sending + .send_federation_request(&remote_server, send_knock_request) + .await?; + + info!("send_knock finished"); + + services + .rooms + .short + .get_or_create_shortroomid(room_id) + .await; + + info!("Parsing knock event"); + + let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone()) + .map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?; + + info!("Updating membership locally to knock state with provided stripped state events"); + services + .rooms + .state_cache + .update_membership( + room_id, + sender_user, + parsed_knock_pdu + .get_content::() + .expect("we just created this"), + sender_user, + Some(send_knock_response.knock_room_state), + None, + false, + ) + .await?; + + info!("Appending room knock event locally"); + services + .rooms + .timeline + .append_pdu( + &parsed_knock_pdu, + knock_event, + once(parsed_knock_pdu.event_id.borrow()), + &state_lock, + ) + .await?; + + Ok(()) +} + +async fn knock_room_helper_remote( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + state_lock: RoomMutexGuard, +) -> Result { + info!("Knocking {room_id} over federation."); + + let (make_knock_response, remote_server) = + make_knock_request(services, sender_user, room_id, servers).await?; + + info!("make_knock finished"); + + let room_version_id = make_knock_response.room_version; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + + let mut knock_event_stub: CanonicalJsonObject = + serde_json::from_str(make_knock_response.event.get()).map_err(|e| { + err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}")) + })?; + + knock_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + knock_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + knock_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + ..RoomMemberEventContent::new(MembershipState::Knock) + }) + .expect("event is valid, we just created it"), + ); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; + + // Add event_id + knock_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let knock_event = knock_event_stub; + + info!("Asking {remote_server} for send_knock in room {room_id}"); + let send_knock_request = federation::knock::send_knock::v1::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + pdu: services + .sending + .convert_to_outgoing_federation_event(knock_event.clone()) + .await, + }; + + let send_knock_response = services + .sending + .send_federation_request(&remote_server, send_knock_request) + .await?; + + info!("send_knock finished"); + + services + .rooms + .short + .get_or_create_shortroomid(room_id) + .await; + + info!("Parsing knock event"); + let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone()) + .map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?; + + info!("Going through send_knock response knock state events"); + let state = send_knock_response + .knock_room_state + .iter() + .map(|event| serde_json::from_str::(event.clone().into_json().get())) + .filter_map(Result::ok); + + let mut state_map: HashMap = HashMap::new(); + + for event in state { + let Some(state_key) = event.get("state_key") else { + debug_warn!("send_knock stripped state event missing state_key: {event:?}"); + continue; + }; + let Some(event_type) = event.get("type") else { + debug_warn!("send_knock stripped state event missing event type: {event:?}"); + continue; + }; + + let Ok(state_key) = serde_json::from_value::(state_key.clone().into()) else { + debug_warn!("send_knock stripped state event has invalid state_key: {event:?}"); + continue; + }; + let Ok(event_type) = serde_json::from_value::(event_type.clone().into()) + else { + debug_warn!("send_knock stripped state event has invalid event type: {event:?}"); + continue; + }; + + let event_id = gen_event_id(&event, &room_version_id)?; + let shortstatekey = services + .rooms + .short + .get_or_create_shortstatekey(&event_type, &state_key) + .await; + + services.rooms.outlier.add_pdu_outlier(&event_id, &event); + state_map.insert(shortstatekey, event_id.clone()); + } + + info!("Compressing state from send_knock"); + let compressed: CompressedState = services + .rooms + .state_compressor + .compress_state_events(state_map.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) + .collect() + .await; + + debug!("Saving compressed state"); + let HashSetCompressStateEvent { + shortstatehash: statehash_before_knock, + added, + removed, + } = services + .rooms + .state_compressor + .save_state(room_id, Arc::new(compressed)) + .await?; + + debug!("Forcing state for new room"); + services + .rooms + .state + .force_state(room_id, statehash_before_knock, added, removed, &state_lock) + .await?; + + let statehash_after_knock = services + .rooms + .state + .append_to_state(&parsed_knock_pdu) + .await?; + + info!("Updating membership locally to knock state with provided stripped state events"); + services + .rooms + .state_cache + .update_membership( + room_id, + sender_user, + parsed_knock_pdu + .get_content::() + .expect("we just created this"), + sender_user, + Some(send_knock_response.knock_room_state), + None, + false, + ) + .await?; + + info!("Appending room knock event locally"); + services + .rooms + .timeline + .append_pdu( + &parsed_knock_pdu, + knock_event, + once(parsed_knock_pdu.event_id.borrow()), + &state_lock, + ) + .await?; + + info!("Setting final room state for new room"); + // We set the room state after inserting the pdu, so that we never have a moment + // in time where events in the current room state do not exist + services + .rooms + .state + .set_room_state(room_id, statehash_after_knock, &state_lock); + + Ok(()) +} + +async fn make_knock_request( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + servers: &[OwnedServerName], +) -> Result<(federation::knock::create_knock_event_template::v1::Response, OwnedServerName)> { + let mut make_knock_response_and_server = + Err!(BadServerResponse("No server available to assist in knocking.")); + + let mut make_knock_counter: usize = 0; + + for remote_server in servers { + if services.globals.server_is_ours(remote_server) { + continue; + } + + info!("Asking {remote_server} for make_knock ({make_knock_counter})"); + + let make_knock_response = services + .sending + .send_federation_request( + remote_server, + federation::knock::create_knock_event_template::v1::Request { + room_id: room_id.to_owned(), + user_id: sender_user.to_owned(), + ver: services.server.supported_room_versions().collect(), + }, + ) + .await; + + trace!("make_knock response: {make_knock_response:?}"); + make_knock_counter = make_knock_counter.saturating_add(1); + + make_knock_response_and_server = make_knock_response.map(|r| (r, remote_server.clone())); + + if make_knock_response_and_server.is_ok() { + break; + } + + if make_knock_counter > 40 { + warn!( + "50 servers failed to provide valid make_knock response, assuming no server can \ + assist in knocking." + ); + make_knock_response_and_server = + Err!(BadServerResponse("No server available to assist in knocking.")); + + return make_knock_response_and_server; + } + } + + make_knock_response_and_server +} diff --git a/src/api/client/membership/leave.rs b/src/api/client/membership/leave.rs new file mode 100644 index 00000000..a64fb41f --- /dev/null +++ b/src/api/client/membership/leave.rs @@ -0,0 +1,386 @@ +use std::collections::HashSet; + +use axum::extract::State; +use conduwuit::{ + Err, Result, debug_info, debug_warn, err, + matrix::pdu::{PduBuilder, gen_event_id}, + utils::{self, FutureBoolExt, future::ReadyEqExt}, + warn, +}; +use futures::{FutureExt, StreamExt, TryFutureExt, pin_mut}; +use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, RoomId, RoomVersionId, UserId, + api::{ + client::membership::leave_room, + federation::{self}, + }, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, +}; +use service::Services; + +use crate::Ruma; + +/// # `POST /_matrix/client/v3/rooms/{roomId}/leave` +/// +/// Tries to leave the sender user from a room. +/// +/// - This should always work if the user is currently joined. +pub(crate) async fn leave_room_route( + State(services): State, + body: Ruma, +) -> Result { + leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()) + .boxed() + .await + .map(|()| leave_room::v3::Response::new()) +} + +// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms, +// and ignores errors +pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { + let rooms_joined = services + .rooms + .state_cache + .rooms_joined(user_id) + .map(ToOwned::to_owned); + + let rooms_invited = services + .rooms + .state_cache + .rooms_invited(user_id) + .map(|(r, _)| r); + + let rooms_knocked = services + .rooms + .state_cache + .rooms_knocked(user_id) + .map(|(r, _)| r); + + let all_rooms: Vec<_> = rooms_joined + .chain(rooms_invited) + .chain(rooms_knocked) + .collect() + .await; + + for room_id in all_rooms { + // ignore errors + if let Err(e) = leave_room(services, user_id, &room_id, None).boxed().await { + warn!(%user_id, "Failed to leave {room_id} remotely: {e}"); + } + + services.rooms.state_cache.forget(&room_id, user_id); + } +} + +pub async fn leave_room( + services: &Services, + user_id: &UserId, + room_id: &RoomId, + reason: Option, +) -> Result { + let default_member_content = RoomMemberEventContent { + membership: MembershipState::Leave, + reason: reason.clone(), + join_authorized_via_users_server: None, + is_direct: None, + avatar_url: None, + displayname: None, + third_party_invite: None, + blurhash: None, + redact_events: None, + }; + + let is_banned = services.rooms.metadata.is_banned(room_id); + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + pin_mut!(is_banned, is_disabled); + if is_banned.or(is_disabled).await { + // the room is banned/disabled, the room must be rejected locally since we + // cant/dont want to federate with this server + services + .rooms + .state_cache + .update_membership( + room_id, + user_id, + default_member_content, + user_id, + None, + None, + true, + ) + .await?; + + return Ok(()); + } + + let dont_have_room = services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .eq(&false); + + let not_knocked = services + .rooms + .state_cache + .is_knocked(user_id, room_id) + .eq(&false); + + // Ask a remote server if we don't have this room and are not knocking on it + if dont_have_room.and(not_knocked).await { + if let Err(e) = remote_leave_room(services, user_id, room_id, reason.clone()) + .boxed() + .await + { + warn!(%user_id, "Failed to leave room {room_id} remotely: {e}"); + // Don't tell the client about this error + } + + let last_state = services + .rooms + .state_cache + .invite_state(user_id, room_id) + .or_else(|_| services.rooms.state_cache.knock_state(user_id, room_id)) + .or_else(|_| services.rooms.state_cache.left_state(user_id, room_id)) + .await + .ok(); + + // We always drop the invite, we can't rely on other servers + services + .rooms + .state_cache + .update_membership( + room_id, + user_id, + default_member_content, + user_id, + last_state, + None, + true, + ) + .await?; + } else { + let state_lock = services.rooms.state.mutex.lock(room_id).await; + + let Ok(event) = services + .rooms + .state_accessor + .room_state_get_content::( + room_id, + &StateEventType::RoomMember, + user_id.as_str(), + ) + .await + else { + debug_warn!( + "Trying to leave a room you are not a member of, marking room as left locally." + ); + + return services + .rooms + .state_cache + .update_membership( + room_id, + user_id, + default_member_content, + user_id, + None, + None, + true, + ) + .await; + }; + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Leave, + reason, + join_authorized_via_users_server: None, + is_direct: None, + ..event + }), + user_id, + room_id, + &state_lock, + ) + .await?; + } + + Ok(()) +} + +async fn remote_leave_room( + services: &Services, + user_id: &UserId, + room_id: &RoomId, + reason: Option, +) -> Result<()> { + let mut make_leave_response_and_server = + Err!(BadServerResponse("No remote server available to assist in leaving {room_id}.")); + + let mut servers: HashSet = services + .rooms + .state_cache + .servers_invite_via(room_id) + .map(ToOwned::to_owned) + .collect() + .await; + + match services + .rooms + .state_cache + .invite_state(user_id, room_id) + .await + { + | Ok(invite_state) => { + servers.extend( + invite_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + }, + | _ => { + match services + .rooms + .state_cache + .knock_state(user_id, room_id) + .await + { + | Ok(knock_state) => { + servers.extend( + knock_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .filter_map(|sender| { + if !services.globals.user_is_local(sender) { + Some(sender.server_name().to_owned()) + } else { + None + } + }), + ); + }, + | _ => {}, + } + }, + } + + if let Some(room_id_server_name) = room_id.server_name() { + servers.insert(room_id_server_name.to_owned()); + } + + debug_info!("servers in remote_leave_room: {servers:?}"); + + for remote_server in servers { + let make_leave_response = services + .sending + .send_federation_request( + &remote_server, + federation::membership::prepare_leave_event::v1::Request { + room_id: room_id.to_owned(), + user_id: user_id.to_owned(), + }, + ) + .await; + + make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); + + if make_leave_response_and_server.is_ok() { + break; + } + } + + let (make_leave_response, remote_server) = make_leave_response_and_server?; + + let Some(room_version_id) = make_leave_response.room_version else { + return Err!(BadServerResponse(warn!( + "No room version was returned by {remote_server} for {room_id}, room version is \ + likely not supported by conduwuit" + ))); + }; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse(warn!( + "Remote room version {room_version_id} for {room_id} is not supported by conduwuit", + ))); + } + + let mut leave_event_stub = serde_json::from_str::( + make_leave_response.event.get(), + ) + .map_err(|e| { + err!(BadServerResponse(warn!( + "Invalid make_leave event json received from {remote_server} for {room_id}: {e:?}" + ))) + })?; + + // TODO: Is origin needed? + leave_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + leave_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + // Inject the reason key into the event content dict if it exists + if let Some(reason) = reason { + if let Some(CanonicalJsonValue::Object(content)) = leave_event_stub.get_mut("content") { + content.insert("reason".to_owned(), CanonicalJsonValue::String(reason)); + } + } + + // room v3 and above removed the "event_id" field from remote PDU format + match room_version_id { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => { + leave_event_stub.remove("event_id"); + }, + } + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut leave_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&leave_event_stub, &room_version_id)?; + + // Add event_id back + leave_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let leave_event = leave_event_stub; + + services + .sending + .send_federation_request( + &remote_server, + federation::membership::create_leave_event::v2::Request { + room_id: room_id.to_owned(), + event_id, + pdu: services + .sending + .convert_to_outgoing_federation_event(leave_event.clone()) + .await, + }, + ) + .await?; + + Ok(()) +} diff --git a/src/api/client/membership/members.rs b/src/api/client/membership/members.rs new file mode 100644 index 00000000..4a7abf6d --- /dev/null +++ b/src/api/client/membership/members.rs @@ -0,0 +1,147 @@ +use axum::extract::State; +use conduwuit::{ + Err, Event, Result, at, + matrix::pdu::PduEvent, + utils::{ + future::TryExtExt, + stream::{BroadbandExt, ReadyExt}, + }, +}; +use futures::{StreamExt, future::join}; +use ruma::{ + api::client::membership::{ + get_member_events::{self, v3::MembershipEventFilter}, + joined_members::{self, v3::RoomMember}, + }, + events::{ + StateEventType, + room::member::{MembershipState, RoomMemberEventContent}, + }, +}; + +use crate::Ruma; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/members` +/// +/// Lists all joined users in a room (TODO: at a specific point in time, with a +/// specific membership). +/// +/// - Only works if the user is currently joined +pub(crate) async fn get_member_events_route( + State(services): State, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + let membership = body.membership.as_ref(); + let not_membership = body.not_membership.as_ref(); + + if !services + .rooms + .state_accessor + .user_can_see_state_events(sender_user, &body.room_id) + .await + { + return Err!(Request(Forbidden("You don't have permission to view this room."))); + } + + Ok(get_member_events::v3::Response { + chunk: services + .rooms + .state_accessor + .room_state_full(&body.room_id) + .ready_filter_map(Result::ok) + .ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember) + .map(at!(1)) + .ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership)) + .map(Event::into_format) + .collect() + .await, + }) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members` +/// +/// Lists all members of a room. +/// +/// - The sender user must be in the room +/// - TODO: An appservice just needs a puppet joined +pub(crate) async fn joined_members_route( + State(services): State, + body: Ruma, +) -> Result { + if !services + .rooms + .state_accessor + .user_can_see_state_events(body.sender_user(), &body.room_id) + .await + { + return Err!(Request(Forbidden("You don't have permission to view this room."))); + } + + Ok(joined_members::v3::Response { + joined: services + .rooms + .state_cache + .room_members(&body.room_id) + .map(ToOwned::to_owned) + .broad_then(|user_id| async move { + let (display_name, avatar_url) = join( + services.users.displayname(&user_id).ok(), + services.users.avatar_url(&user_id).ok(), + ) + .await; + + (user_id, RoomMember { display_name, avatar_url }) + }) + .collect() + .await, + }) +} + +fn membership_filter( + pdu: PduEvent, + for_membership: Option<&MembershipEventFilter>, + not_membership: Option<&MembershipEventFilter>, +) -> Option { + let membership_state_filter = match for_membership { + | Some(MembershipEventFilter::Ban) => MembershipState::Ban, + | Some(MembershipEventFilter::Invite) => MembershipState::Invite, + | Some(MembershipEventFilter::Knock) => MembershipState::Knock, + | Some(MembershipEventFilter::Leave) => MembershipState::Leave, + | Some(_) | None => MembershipState::Join, + }; + + let not_membership_state_filter = match not_membership { + | Some(MembershipEventFilter::Ban) => MembershipState::Ban, + | Some(MembershipEventFilter::Invite) => MembershipState::Invite, + | Some(MembershipEventFilter::Join) => MembershipState::Join, + | Some(MembershipEventFilter::Knock) => MembershipState::Knock, + | Some(_) | None => MembershipState::Leave, + }; + + let evt_membership = pdu.get_content::().ok()?.membership; + + if for_membership.is_some() && not_membership.is_some() { + if membership_state_filter != evt_membership + || not_membership_state_filter == evt_membership + { + None + } else { + Some(pdu) + } + } else if for_membership.is_some() && not_membership.is_none() { + if membership_state_filter != evt_membership { + None + } else { + Some(pdu) + } + } else if not_membership.is_some() && for_membership.is_none() { + if not_membership_state_filter == evt_membership { + None + } else { + Some(pdu) + } + } else { + Some(pdu) + } +} diff --git a/src/api/client/membership/mod.rs b/src/api/client/membership/mod.rs new file mode 100644 index 00000000..7a6f19ad --- /dev/null +++ b/src/api/client/membership/mod.rs @@ -0,0 +1,156 @@ +mod ban; +mod forget; +mod invite; +mod join; +mod kick; +mod knock; +mod leave; +mod members; +mod unban; + +use std::net::IpAddr; + +use axum::extract::State; +use conduwuit::{Err, Result, warn}; +use futures::{FutureExt, StreamExt}; +use ruma::{OwnedRoomId, RoomId, ServerName, UserId, api::client::membership::joined_rooms}; +use service::Services; + +pub(crate) use self::{ + ban::ban_user_route, + forget::forget_room_route, + invite::{invite_helper, invite_user_route}, + join::{join_room_by_id_or_alias_route, join_room_by_id_route}, + kick::kick_user_route, + knock::knock_room_route, + leave::leave_room_route, + members::{get_member_events_route, joined_members_route}, + unban::unban_user_route, +}; +pub use self::{ + join::join_room_by_id_helper, + leave::{leave_all_rooms, leave_room}, +}; +use crate::{Ruma, client::full_user_deactivate}; + +/// # `POST /_matrix/client/r0/joined_rooms` +/// +/// Lists all rooms the user has joined. +pub(crate) async fn joined_rooms_route( + State(services): State, + body: Ruma, +) -> Result { + Ok(joined_rooms::v3::Response { + joined_rooms: services + .rooms + .state_cache + .rooms_joined(body.sender_user()) + .map(ToOwned::to_owned) + .collect() + .await, + }) +} + +/// Checks if the room is banned in any way possible and the sender user is not +/// an admin. +/// +/// Performs automatic deactivation if `auto_deactivate_banned_room_attempts` is +/// enabled +#[tracing::instrument(skip(services))] +pub(crate) async fn banned_room_check( + services: &Services, + user_id: &UserId, + room_id: Option<&RoomId>, + server_name: Option<&ServerName>, + client_ip: IpAddr, +) -> Result { + if services.users.is_admin(user_id).await { + return Ok(()); + } + + if let Some(room_id) = room_id { + if services.rooms.metadata.is_banned(room_id).await + || services + .moderation + .is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid")) + { + warn!( + "User {user_id} who is not an admin attempted to send an invite for or \ + attempted to join a banned room or banned room server name: {room_id}" + ); + + if services.server.config.auto_deactivate_banned_room_attempts { + warn!( + "Automatically deactivating user {user_id} due to attempted banned room join" + ); + + if services.server.config.admin_room_notices { + services + .admin + .send_text(&format!( + "Automatically deactivating user {user_id} due to attempted banned \ + room join from IP {client_ip}" + )) + .await; + } + + let all_joined_rooms: Vec = services + .rooms + .state_cache + .rooms_joined(user_id) + .map(Into::into) + .collect() + .await; + + full_user_deactivate(services, user_id, &all_joined_rooms) + .boxed() + .await?; + } + + return Err!(Request(Forbidden("This room is banned on this homeserver."))); + } + } else if let Some(server_name) = server_name { + if services + .config + .forbidden_remote_server_names + .is_match(server_name.host()) + { + warn!( + "User {user_id} who is not an admin tried joining a room which has the server \ + name {server_name} that is globally forbidden. Rejecting.", + ); + + if services.server.config.auto_deactivate_banned_room_attempts { + warn!( + "Automatically deactivating user {user_id} due to attempted banned room join" + ); + + if services.server.config.admin_room_notices { + services + .admin + .send_text(&format!( + "Automatically deactivating user {user_id} due to attempted banned \ + room join from IP {client_ip}" + )) + .await; + } + + let all_joined_rooms: Vec = services + .rooms + .state_cache + .rooms_joined(user_id) + .map(Into::into) + .collect() + .await; + + full_user_deactivate(services, user_id, &all_joined_rooms) + .boxed() + .await?; + } + + return Err!(Request(Forbidden("This remote server is banned on this homeserver."))); + } + } + + Ok(()) +} diff --git a/src/api/client/membership/unban.rs b/src/api/client/membership/unban.rs new file mode 100644 index 00000000..34c5eace --- /dev/null +++ b/src/api/client/membership/unban.rs @@ -0,0 +1,58 @@ +use axum::extract::State; +use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; +use ruma::{ + api::client::membership::unban_user, + events::room::member::{MembershipState, RoomMemberEventContent}, +}; + +use crate::Ruma; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/unban` +/// +/// Tries to send an unban event into the room. +pub(crate) async fn unban_user_route( + State(services): State, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + + let current_member_content = services + .rooms + .state_accessor + .get_member(&body.room_id, &body.user_id) + .await + .unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Leave)); + + if current_member_content.membership != MembershipState::Ban { + return Err!(Request(Forbidden( + "Cannot unban a user who is not banned (current membership: {})", + current_member_content.membership + ))); + } + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Leave, + reason: body.reason.clone(), + join_authorized_via_users_server: None, + third_party_invite: None, + is_direct: None, + ..current_member_content + }), + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(unban_user::v3::Response::new()) +} diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 6efad64e..ff8c2a0b 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -4,11 +4,14 @@ use axum::extract::State; use conduwuit::{ Err, Result, matrix::pdu::PduBuilder, - utils::{IterStream, stream::TryIgnore}, + utils::{IterStream, future::TryExtExt, stream::TryIgnore}, warn, }; use conduwuit_service::Services; -use futures::{StreamExt, TryStreamExt, future::join3}; +use futures::{ + StreamExt, TryStreamExt, + future::{join, join3, join4}, +}; use ruma::{ OwnedMxcUri, OwnedRoomId, UserId, api::{ @@ -214,10 +217,13 @@ pub(crate) async fn get_avatar_url_route( return Err!(Request(NotFound("Profile was not found."))); } - Ok(get_avatar_url::v3::Response { - avatar_url: services.users.avatar_url(&body.user_id).await.ok(), - blurhash: services.users.blurhash(&body.user_id).await.ok(), - }) + let (avatar_url, blurhash) = join( + services.users.avatar_url(&body.user_id).ok(), + services.users.blurhash(&body.user_id).ok(), + ) + .await; + + Ok(get_avatar_url::v3::Response { avatar_url, blurhash }) } /// # `GET /_matrix/client/v3/profile/{userId}` @@ -297,11 +303,19 @@ pub(crate) async fn get_profile_route( custom_profile_fields.remove("us.cloke.msc4175.tz"); custom_profile_fields.remove("m.tz"); + let (avatar_url, blurhash, displayname, tz) = join4( + services.users.avatar_url(&body.user_id).ok(), + services.users.blurhash(&body.user_id).ok(), + services.users.displayname(&body.user_id).ok(), + services.users.timezone(&body.user_id).ok(), + ) + .await; + Ok(get_profile::v3::Response { - avatar_url: services.users.avatar_url(&body.user_id).await.ok(), - blurhash: services.users.blurhash(&body.user_id).await.ok(), - displayname: services.users.displayname(&body.user_id).await.ok(), - tz: services.users.timezone(&body.user_id).await.ok(), + avatar_url, + blurhash, + displayname, + tz, custom_profile_fields, }) } @@ -313,16 +327,12 @@ pub async fn update_displayname( all_joined_rooms: &[OwnedRoomId], ) { let (current_avatar_url, current_blurhash, current_displayname) = join3( - services.users.avatar_url(user_id), - services.users.blurhash(user_id), - services.users.displayname(user_id), + services.users.avatar_url(user_id).ok(), + services.users.blurhash(user_id).ok(), + services.users.displayname(user_id).ok(), ) .await; - let current_avatar_url = current_avatar_url.ok(); - let current_blurhash = current_blurhash.ok(); - let current_displayname = current_displayname.ok(); - if displayname == current_displayname { return; } @@ -366,16 +376,12 @@ pub async fn update_avatar_url( all_joined_rooms: &[OwnedRoomId], ) { let (current_avatar_url, current_blurhash, current_displayname) = join3( - services.users.avatar_url(user_id), - services.users.blurhash(user_id), - services.users.displayname(user_id), + services.users.avatar_url(user_id).ok(), + services.users.blurhash(user_id).ok(), + services.users.displayname(user_id).ok(), ) .await; - let current_avatar_url = current_avatar_url.ok(); - let current_blurhash = current_blurhash.ok(); - let current_displayname = current_displayname.ok(); - if current_avatar_url == avatar_url && current_blurhash == blurhash { return; } diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index 8b9f3ca0..c2f59d4c 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -1,9 +1,9 @@ use axum::extract::State; use conduwuit::{ Err, Event, Result, at, - utils::{BoolExt, stream::TryTools}, + utils::{BoolExt, future::TryExtExt, stream::TryTools}, }; -use futures::TryStreamExt; +use futures::{FutureExt, TryStreamExt, future::try_join4}; use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response}; use crate::Ruma; @@ -25,22 +25,31 @@ pub(crate) async fn room_initial_sync_route( return Err!(Request(Forbidden("No room preview available."))); } - let limit = LIMIT_MAX; - let events: Vec<_> = services + let membership = services .rooms - .timeline - .pdus_rev(None, room_id, None) - .try_take(limit) - .try_collect() - .await?; + .state_cache + .user_membership(body.sender_user(), room_id) + .map(Ok); - let state: Vec<_> = services + let visibility = services.rooms.directory.visibility(room_id).map(Ok); + + let state = services .rooms .state_accessor .room_state_full_pdus(room_id) .map_ok(Event::into_format) - .try_collect() - .await?; + .try_collect::>(); + + let limit = LIMIT_MAX; + let events = services + .rooms + .timeline + .pdus_rev(None, room_id, None) + .try_take(limit) + .try_collect::>(); + + let (membership, visibility, state, events) = + try_join4(membership, visibility, state, events).await?; let messages = PaginationChunk { start: events.last().map(at!(0)).as_ref().map(ToString::to_string), @@ -64,11 +73,7 @@ pub(crate) async fn room_initial_sync_route( account_data: None, state: state.into(), messages: messages.chunk.is_empty().or_some(messages), - visibility: services.rooms.directory.visibility(room_id).await.into(), - membership: services - .rooms - .state_cache - .user_membership(body.sender_user(), room_id) - .await, + visibility: visibility.into(), + membership, }) } From 364293608de928c3acd8e7253522ca31713c8435 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sun, 27 Apr 2025 02:39:28 +0000 Subject: [PATCH 079/270] Post-formatting aesthetic and spacing corrections Signed-off-by: Jason Volk --- src/admin/debug/commands.rs | 4 +- src/admin/user/commands.rs | 17 ++-- src/api/client/account.rs | 32 +++++-- src/api/client/directory.rs | 6 +- src/api/client/membership/invite.rs | 2 +- src/api/client/membership/join.rs | 3 +- src/api/client/membership/knock.rs | 5 +- src/api/client/membership/leave.rs | 2 +- src/api/client/membership/members.rs | 10 +- src/api/client/message.rs | 33 ++++--- src/api/client/profile.rs | 5 - src/api/client/relations.rs | 18 ++-- src/api/client/report.rs | 1 + src/api/client/room/initial_sync.rs | 4 +- src/api/client/room/summary.rs | 4 +- src/api/client/room/upgrade.rs | 4 +- src/api/client/sync/v4.rs | 4 +- src/api/server/invite.rs | 6 +- src/api/server/send_join.rs | 2 +- src/api/server/send_knock.rs | 2 +- src/api/server/send_leave.rs | 2 +- src/core/config/proxy.rs | 5 +- src/core/info/cargo.rs | 6 +- src/core/matrix/event.rs | 90 ++++++++++++++++-- src/core/matrix/event/filter.rs | 93 +++++++++++++++++++ .../matrix/{pdu/event_id.rs => event/id.rs} | 0 src/core/matrix/event/relation.rs | 28 ++++++ src/core/matrix/event/unsigned.rs | 51 ++++++++++ src/core/matrix/pdu.rs | 28 ++++-- src/core/matrix/pdu/content.rs | 20 ---- src/core/matrix/pdu/filter.rs | 90 ------------------ src/core/matrix/pdu/redact.rs | 4 +- src/core/matrix/pdu/relation.rs | 22 ----- src/core/matrix/pdu/unsigned.rs | 43 +-------- src/core/matrix/state_res/mod.rs | 10 +- src/core/mods/module.rs | 1 + src/core/mods/path.rs | 1 + src/core/utils/html.rs | 2 + src/core/utils/json.rs | 28 +++--- src/core/utils/time.rs | 3 - src/database/watchers.rs | 1 - src/main/logging.rs | 13 +++ src/main/mods.rs | 4 + src/main/sentry.rs | 8 +- src/router/request.rs | 2 +- src/service/admin/mod.rs | 6 +- src/service/pusher/mod.rs | 6 +- src/service/rooms/alias/mod.rs | 4 +- .../fetch_and_handle_outliers.rs | 35 ++++--- src/service/rooms/event_handler/fetch_prev.rs | 49 ++++++---- .../rooms/event_handler/fetch_state.rs | 25 ++--- .../event_handler/handle_incoming_pdu.rs | 15 +-- .../rooms/event_handler/handle_outlier_pdu.rs | 35 ++++--- .../rooms/event_handler/handle_prev_pdu.rs | 18 ++-- src/service/rooms/event_handler/mod.rs | 12 +-- .../rooms/event_handler/parse_incoming_pdu.rs | 4 +- .../rooms/event_handler/state_at_incoming.rs | 48 ++++++---- .../event_handler/upgrade_outlier_pdu.rs | 50 +++++----- src/service/rooms/outlier/mod.rs | 4 +- src/service/rooms/pdu_metadata/data.rs | 10 +- src/service/rooms/pdu_metadata/mod.rs | 19 ++-- src/service/rooms/read_receipt/mod.rs | 10 +- src/service/rooms/search/mod.rs | 12 ++- src/service/rooms/state/mod.rs | 4 +- .../rooms/state_accessor/room_state.rs | 8 +- src/service/rooms/state_accessor/state.rs | 18 ++-- src/service/rooms/state_accessor/user_can.rs | 14 +-- src/service/rooms/threads/mod.rs | 13 ++- src/service/rooms/timeline/data.rs | 1 - src/service/rooms/timeline/mod.rs | 89 +++++++++--------- src/service/sending/sender.rs | 2 +- src/service/server_keys/verify.rs | 2 +- 72 files changed, 704 insertions(+), 528 deletions(-) create mode 100644 src/core/matrix/event/filter.rs rename src/core/matrix/{pdu/event_id.rs => event/id.rs} (100%) create mode 100644 src/core/matrix/event/relation.rs create mode 100644 src/core/matrix/event/unsigned.rs delete mode 100644 src/core/matrix/pdu/content.rs delete mode 100644 src/core/matrix/pdu/filter.rs delete mode 100644 src/core/matrix/pdu/relation.rs diff --git a/src/admin/debug/commands.rs b/src/admin/debug/commands.rs index 74355311..81b0e9da 100644 --- a/src/admin/debug/commands.rs +++ b/src/admin/debug/commands.rs @@ -558,8 +558,8 @@ pub(super) async fn force_set_room_state_from_server( .latest_pdu_in_room(&room_id) .await .map_err(|_| err!(Database("Failed to find the latest PDU in database")))? - .event_id - .clone(), + .event_id() + .to_owned(), }; let room_version = self.services.rooms.state.get_room_version(&room_id).await?; diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index e15c0b2c..86206c2b 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -738,7 +738,7 @@ pub(super) async fn force_demote(&self, user_id: String, room_id: OwnedRoomOrAli .state_accessor .room_state_get(&room_id, &StateEventType::RoomCreate, "") .await - .is_ok_and(|event| event.sender == user_id); + .is_ok_and(|event| event.sender() == user_id); if !user_can_demote_self { return Err!("User is not allowed to modify their own power levels in the room.",); @@ -889,10 +889,7 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result { return Err!("Event is already redacted."); } - let room_id = event.room_id; - let sender_user = event.sender; - - if !self.services.globals.user_is_local(&sender_user) { + if !self.services.globals.user_is_local(event.sender()) { return Err!("This command only works on local users."); } @@ -902,21 +899,21 @@ pub(super) async fn redact_event(&self, event_id: OwnedEventId) -> Result { ); let redaction_event_id = { - let state_lock = self.services.rooms.state.mutex.lock(&room_id).await; + let state_lock = self.services.rooms.state.mutex.lock(event.room_id()).await; self.services .rooms .timeline .build_and_append_pdu( PduBuilder { - redacts: Some(event.event_id.clone()), + redacts: Some(event.event_id().to_owned()), ..PduBuilder::timeline(&RoomRedactionEventContent { - redacts: Some(event.event_id.clone()), + redacts: Some(event.event_id().to_owned()), reason: Some(reason), }) }, - &sender_user, - &room_id, + event.sender(), + event.room_id(), &state_lock, ) .await? diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 14bbcf98..df938c17 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -3,10 +3,9 @@ use std::fmt::Write; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Error, Result, debug_info, err, error, info, is_equal_to, + Err, Error, Event, Result, debug_info, err, error, info, is_equal_to, matrix::pdu::PduBuilder, - utils, - utils::{ReadyExt, stream::BroadbandExt}, + utils::{self, ReadyExt, stream::BroadbandExt}, warn, }; use conduwuit_service::Services; @@ -140,16 +139,32 @@ pub(crate) async fn register_route( if !services.config.allow_registration && body.appservice_info.is_none() { match (body.username.as_ref(), body.initial_device_display_name.as_ref()) { | (Some(username), Some(device_display_name)) => { - info!(%is_guest, user = %username, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); + info!( + %is_guest, + user = %username, + device_name = %device_display_name, + "Rejecting registration attempt as registration is disabled" + ); }, | (Some(username), _) => { - info!(%is_guest, user = %username, "Rejecting registration attempt as registration is disabled"); + info!( + %is_guest, + user = %username, + "Rejecting registration attempt as registration is disabled" + ); }, | (_, Some(device_display_name)) => { - info!(%is_guest, device_name = %device_display_name, "Rejecting registration attempt as registration is disabled"); + info!( + %is_guest, + device_name = %device_display_name, + "Rejecting registration attempt as registration is disabled" + ); }, | (None, _) => { - info!(%is_guest, "Rejecting registration attempt as registration is disabled"); + info!( + %is_guest, + "Rejecting registration attempt as registration is disabled" + ); }, } @@ -835,6 +850,7 @@ pub async fn full_user_deactivate( all_joined_rooms: &[OwnedRoomId], ) -> Result<()> { services.users.deactivate_account(user_id).await.ok(); + super::update_displayname(services, user_id, None, all_joined_rooms).await; super::update_avatar_url(services, user_id, None, None, all_joined_rooms).await; @@ -871,7 +887,7 @@ pub async fn full_user_deactivate( .state_accessor .room_state_get(room_id, &StateEventType::RoomCreate, "") .await - .is_ok_and(|event| event.sender == user_id); + .is_ok_and(|event| event.sender() == user_id); if user_can_demote_self { let mut power_levels_content = room_power_levels.unwrap_or_default(); diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index 2e219fd9..00879274 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -1,7 +1,7 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Result, err, info, + Err, Event, Result, err, info, utils::{ TryFutureExtExt, math::Expected, @@ -352,7 +352,7 @@ async fn user_can_publish_room( .room_state_get(room_id, &StateEventType::RoomPowerLevels, "") .await { - | Ok(event) => serde_json::from_str(event.content.get()) + | Ok(event) => serde_json::from_str(event.content().get()) .map_err(|_| err!(Database("Invalid event content for m.room.power_levels"))) .map(|content: RoomPowerLevelsEventContent| { RoomPowerLevels::from(content) @@ -365,7 +365,7 @@ async fn user_can_publish_room( .room_state_get(room_id, &StateEventType::RoomCreate, "") .await { - | Ok(event) => Ok(event.sender == user_id), + | Ok(event) => Ok(event.sender() == user_id), | _ => Err!(Request(Forbidden("User is not allowed to publish this room"))), } }, diff --git a/src/api/client/membership/invite.rs b/src/api/client/membership/invite.rs index 4ca3efb8..018fb774 100644 --- a/src/api/client/membership/invite.rs +++ b/src/api/client/membership/invite.rs @@ -2,7 +2,7 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ Err, Result, debug_error, err, info, - matrix::pdu::{PduBuilder, gen_event_id_canonical_json}, + matrix::{event::gen_event_id_canonical_json, pdu::PduBuilder}, }; use futures::{FutureExt, join}; use ruma::{ diff --git a/src/api/client/membership/join.rs b/src/api/client/membership/join.rs index 669e9399..9d19d3bc 100644 --- a/src/api/client/membership/join.rs +++ b/src/api/client/membership/join.rs @@ -6,7 +6,8 @@ use conduwuit::{ Err, Result, debug, debug_info, debug_warn, err, error, info, matrix::{ StateKey, - pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, + event::{gen_event_id, gen_event_id_canonical_json}, + pdu::{PduBuilder, PduEvent}, state_res, }, result::FlatOk, diff --git a/src/api/client/membership/knock.rs b/src/api/client/membership/knock.rs index 544dcfb3..79f16631 100644 --- a/src/api/client/membership/knock.rs +++ b/src/api/client/membership/knock.rs @@ -4,7 +4,10 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ Err, Result, debug, debug_info, debug_warn, err, info, - matrix::pdu::{PduBuilder, PduEvent, gen_event_id}, + matrix::{ + event::{Event, gen_event_id}, + pdu::{PduBuilder, PduEvent}, + }, result::FlatOk, trace, utils::{self, shuffle, stream::IterStream}, diff --git a/src/api/client/membership/leave.rs b/src/api/client/membership/leave.rs index a64fb41f..f4f1666b 100644 --- a/src/api/client/membership/leave.rs +++ b/src/api/client/membership/leave.rs @@ -3,7 +3,7 @@ use std::collections::HashSet; use axum::extract::State; use conduwuit::{ Err, Result, debug_info, debug_warn, err, - matrix::pdu::{PduBuilder, gen_event_id}, + matrix::{event::gen_event_id, pdu::PduBuilder}, utils::{self, FutureBoolExt, future::ReadyEqExt}, warn, }; diff --git a/src/api/client/membership/members.rs b/src/api/client/membership/members.rs index 4a7abf6d..05ba1c43 100644 --- a/src/api/client/membership/members.rs +++ b/src/api/client/membership/members.rs @@ -1,13 +1,12 @@ use axum::extract::State; use conduwuit::{ Err, Event, Result, at, - matrix::pdu::PduEvent, utils::{ future::TryExtExt, stream::{BroadbandExt, ReadyExt}, }, }; -use futures::{StreamExt, future::join}; +use futures::{FutureExt, StreamExt, future::join}; use ruma::{ api::client::membership::{ get_member_events::{self, v3::MembershipEventFilter}, @@ -55,6 +54,7 @@ pub(crate) async fn get_member_events_route( .ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership)) .map(Event::into_format) .collect() + .boxed() .await, }) } @@ -98,11 +98,11 @@ pub(crate) async fn joined_members_route( }) } -fn membership_filter( - pdu: PduEvent, +fn membership_filter( + pdu: Pdu, for_membership: Option<&MembershipEventFilter>, not_membership: Option<&MembershipEventFilter>, -) -> Option { +) -> Option { let membership_state_filter = match for_membership { | Some(MembershipEventFilter::Ban) => MembershipState::Ban, | Some(MembershipEventFilter::Invite) => MembershipState::Invite, diff --git a/src/api/client/message.rs b/src/api/client/message.rs index e32d020f..f8818ebb 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -2,9 +2,10 @@ use axum::extract::State; use conduwuit::{ Err, Result, at, matrix::{ - Event, - pdu::{PduCount, PduEvent}, + event::{Event, Matches}, + pdu::PduCount, }, + ref_at, utils::{ IterStream, ReadyExt, result::{FlatOk, LogErr}, @@ -216,7 +217,9 @@ where pin_mut!(receipts); let witness: Witness = events .stream() - .map(|(_, pdu)| pdu.sender.clone()) + .map(ref_at!(1)) + .map(Event::sender) + .map(ToOwned::to_owned) .chain( receipts .ready_take_while(|(_, c, _)| *c <= newest.into_unsigned()) @@ -261,27 +264,33 @@ pub(crate) async fn ignored_filter( } #[inline] -pub(crate) async fn is_ignored_pdu( +pub(crate) async fn is_ignored_pdu( services: &Services, - pdu: &PduEvent, + event: &Pdu, user_id: &UserId, -) -> bool { +) -> bool +where + Pdu: Event + Send + Sync, +{ // exclude Synapse's dummy events from bloating up response bodies. clients // don't need to see this. - if pdu.kind.to_cow_str() == "org.matrix.dummy_event" { + if event.kind().to_cow_str() == "org.matrix.dummy_event" { return true; } - let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(&pdu.kind).is_ok(); + let ignored_type = IGNORED_MESSAGE_TYPES.binary_search(event.kind()).is_ok(); let ignored_server = services .moderation - .is_remote_server_ignored(pdu.sender().server_name()); + .is_remote_server_ignored(event.sender().server_name()); if ignored_type && (ignored_server || (!services.config.send_messages_from_ignored_users_to_client - && services.users.user_is_ignored(&pdu.sender, user_id).await)) + && services + .users + .user_is_ignored(event.sender(), user_id) + .await)) { return true; } @@ -300,7 +309,7 @@ pub(crate) async fn visibility_filter( services .rooms .state_accessor - .user_can_see_event(user_id, &pdu.room_id, &pdu.event_id) + .user_can_see_event(user_id, pdu.room_id(), pdu.event_id()) .await .then_some(item) } @@ -308,7 +317,7 @@ pub(crate) async fn visibility_filter( #[inline] pub(crate) fn event_filter(item: PdusIterItem, filter: &RoomEventFilter) -> Option { let (_, pdu) = &item; - pdu.matches(filter).then_some(item) + filter.matches(pdu).then_some(item) } #[cfg_attr(debug_assertions, conduwuit::ctor)] diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index ff8c2a0b..1882495c 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -195,11 +195,9 @@ pub(crate) async fn get_avatar_url_route( services .users .set_displayname(&body.user_id, response.displayname.clone()); - services .users .set_avatar_url(&body.user_id, response.avatar_url.clone()); - services .users .set_blurhash(&body.user_id, response.blurhash.clone()); @@ -256,15 +254,12 @@ pub(crate) async fn get_profile_route( services .users .set_displayname(&body.user_id, response.displayname.clone()); - services .users .set_avatar_url(&body.user_id, response.avatar_url.clone()); - services .users .set_blurhash(&body.user_id, response.blurhash.clone()); - services .users .set_timezone(&body.user_id, response.tz.clone()); diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index ad726b90..1aa34ada 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,10 +1,10 @@ use axum::extract::State; use conduwuit::{ Result, at, - matrix::{Event, pdu::PduCount}, + matrix::{Event, event::RelationTypeEqual, pdu::PduCount}, utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt}, }; -use conduwuit_service::{Services, rooms::timeline::PdusIterItem}; +use conduwuit_service::Services; use futures::StreamExt; use ruma::{ EventId, RoomId, UInt, UserId, @@ -129,7 +129,7 @@ async fn paginate_relations_with_filter( // Spec (v1.10) recommends depth of at least 3 let depth: u8 = if recurse { 3 } else { 1 }; - let events: Vec = services + let events: Vec<_> = services .rooms .pdu_metadata .get_relations(sender_user, room_id, target, start, limit, depth, dir) @@ -138,12 +138,12 @@ async fn paginate_relations_with_filter( .filter(|(_, pdu)| { filter_event_type .as_ref() - .is_none_or(|kind| *kind == pdu.kind) + .is_none_or(|kind| kind == pdu.kind()) }) .filter(|(_, pdu)| { filter_rel_type .as_ref() - .is_none_or(|rel_type| pdu.relation_type_equal(rel_type)) + .is_none_or(|rel_type| rel_type.relation_type_equal(pdu)) }) .stream() .ready_take_while(|(count, _)| Some(*count) != to) @@ -172,17 +172,17 @@ async fn paginate_relations_with_filter( }) } -async fn visibility_filter( +async fn visibility_filter( services: &Services, sender_user: &UserId, - item: PdusIterItem, -) -> Option { + item: (PduCount, Pdu), +) -> Option<(PduCount, Pdu)> { let (_, pdu) = &item; services .rooms .state_accessor - .user_can_see_event(sender_user, &pdu.room_id, &pdu.event_id) + .user_can_see_event(sender_user, pdu.room_id(), pdu.event_id()) .await .then_some(item) } diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 1019b358..052329d1 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -260,5 +260,6 @@ async fn delay_response() { "Got successful /report request, waiting {time_to_wait} seconds before sending \ successful response." ); + sleep(Duration::from_secs(time_to_wait)).await; } diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index c2f59d4c..2aca5b9d 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -49,7 +49,9 @@ pub(crate) async fn room_initial_sync_route( .try_collect::>(); let (membership, visibility, state, events) = - try_join4(membership, visibility, state, events).await?; + try_join4(membership, visibility, state, events) + .boxed() + .await?; let messages = PaginationChunk { start: events.last().map(at!(0)).as_ref().map(ToString::to_string), diff --git a/src/api/client/room/summary.rs b/src/api/client/room/summary.rs index ab534765..635f5a8a 100644 --- a/src/api/client/room/summary.rs +++ b/src/api/client/room/summary.rs @@ -112,13 +112,15 @@ async fn local_room_summary_response( ) -> Result { trace!(?sender_user, "Sending local room summary response for {room_id:?}"); let join_rule = services.rooms.state_accessor.get_join_rules(room_id); + let world_readable = services.rooms.state_accessor.is_world_readable(room_id); + let guest_can_join = services.rooms.state_accessor.guest_can_join(room_id); let (join_rule, world_readable, guest_can_join) = join3(join_rule, world_readable, guest_can_join).await; - trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}"); + trace!("{join_rule:?}, {world_readable:?}, {guest_can_join:?}"); user_can_see_summary( services, room_id, diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index d8f5ea83..ae632235 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -2,7 +2,7 @@ use std::cmp::max; use axum::extract::State; use conduwuit::{ - Err, Error, Result, err, info, + Err, Error, Event, Result, err, info, matrix::{StateKey, pdu::PduBuilder}, }; use futures::StreamExt; @@ -215,7 +215,7 @@ pub(crate) async fn upgrade_room_route( .room_state_get(&body.room_id, event_type, "") .await { - | Ok(v) => v.content.clone(), + | Ok(v) => v.content().to_owned(), | Err(_) => continue, // Skipping missing events. }; diff --git a/src/api/client/sync/v4.rs b/src/api/client/sync/v4.rs index cabd67e4..14cd50d8 100644 --- a/src/api/client/sync/v4.rs +++ b/src/api/client/sync/v4.rs @@ -6,7 +6,7 @@ use std::{ use axum::extract::State; use conduwuit::{ - Err, Error, Event, PduCount, PduEvent, Result, at, debug, error, extract_variant, + Err, Error, Event, PduCount, Result, at, debug, error, extract_variant, matrix::TypeStateKey, utils::{ BoolExt, IterStream, ReadyExt, TryFutureExtExt, @@ -627,7 +627,7 @@ pub(crate) async fn sync_events_v4_route( .state_accessor .room_state_get(room_id, &state.0, &state.1) .await - .map(PduEvent::into_format) + .map(Event::into_format) .ok() }) .collect() diff --git a/src/api/server/invite.rs b/src/api/server/invite.rs index 0d26d787..0a9b2e10 100644 --- a/src/api/server/invite.rs +++ b/src/api/server/invite.rs @@ -2,8 +2,10 @@ use axum::extract::State; use axum_client_ip::InsecureClientIp; use base64::{Engine as _, engine::general_purpose}; use conduwuit::{ - Err, Error, PduEvent, Result, err, matrix::Event, pdu::gen_event_id, utils, - utils::hash::sha256, warn, + Err, Error, PduEvent, Result, err, + matrix::{Event, event::gen_event_id}, + utils::{self, hash::sha256}, + warn, }; use ruma::{ CanonicalJsonValue, OwnedUserId, UserId, diff --git a/src/api/server/send_join.rs b/src/api/server/send_join.rs index 895eca81..652451c7 100644 --- a/src/api/server/send_join.rs +++ b/src/api/server/send_join.rs @@ -5,7 +5,7 @@ use std::borrow::Borrow; use axum::extract::State; use conduwuit::{ Err, Result, at, err, - pdu::gen_event_id_canonical_json, + matrix::event::gen_event_id_canonical_json, utils::stream::{IterStream, TryBroadbandExt}, warn, }; diff --git a/src/api/server/send_knock.rs b/src/api/server/send_knock.rs index 8d3697d2..ffd41ada 100644 --- a/src/api/server/send_knock.rs +++ b/src/api/server/send_knock.rs @@ -1,7 +1,7 @@ use axum::extract::State; use conduwuit::{ Err, Result, err, - matrix::pdu::{PduEvent, gen_event_id_canonical_json}, + matrix::{event::gen_event_id_canonical_json, pdu::PduEvent}, warn, }; use futures::FutureExt; diff --git a/src/api/server/send_leave.rs b/src/api/server/send_leave.rs index d3dc994c..b6336e1a 100644 --- a/src/api/server/send_leave.rs +++ b/src/api/server/send_leave.rs @@ -1,7 +1,7 @@ #![allow(deprecated)] use axum::extract::State; -use conduwuit::{Err, Result, err, matrix::pdu::gen_event_id_canonical_json}; +use conduwuit::{Err, Result, err, matrix::event::gen_event_id_canonical_json}; use conduwuit_service::Services; use futures::FutureExt; use ruma::{ diff --git a/src/core/config/proxy.rs b/src/core/config/proxy.rs index ea388f24..77c4531a 100644 --- a/src/core/config/proxy.rs +++ b/src/core/config/proxy.rs @@ -88,10 +88,7 @@ impl PartialProxyConfig { } } match (included_because, excluded_because) { - | (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), /* included for - * a more specific - * reason */ - // than excluded + | (Some(a), Some(b)) if a.more_specific_than(b) => Some(&self.url), | (Some(_), None) => Some(&self.url), | _ => None, } diff --git a/src/core/info/cargo.rs b/src/core/info/cargo.rs index e70bdcd5..61a97508 100644 --- a/src/core/info/cargo.rs +++ b/src/core/info/cargo.rs @@ -84,10 +84,12 @@ fn append_features(features: &mut Vec, manifest: &str) -> Result<()> { fn init_dependencies() -> Result { let manifest = Manifest::from_str(WORKSPACE_MANIFEST)?; - Ok(manifest + let deps_set = manifest .workspace .as_ref() .expect("manifest has workspace section") .dependencies - .clone()) + .clone(); + + Ok(deps_set) } diff --git a/src/core/matrix/event.rs b/src/core/matrix/event.rs index 5b12770b..a1d1339e 100644 --- a/src/core/matrix/event.rs +++ b/src/core/matrix/event.rs @@ -1,21 +1,27 @@ mod content; +mod filter; mod format; +mod id; mod redact; +mod relation; mod type_ext; +mod unsigned; + +use std::fmt::Debug; use ruma::{ - EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, RoomVersionId, UserId, - events::TimelineEventType, + CanonicalJsonObject, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, + RoomVersionId, UserId, events::TimelineEventType, }; use serde::Deserialize; use serde_json::{Value as JsonValue, value::RawValue as RawJsonValue}; -pub use self::type_ext::TypeExt; -use super::state_key::StateKey; -use crate::Result; +pub use self::{filter::Matches, id::*, relation::RelationTypeEqual, type_ext::TypeExt}; +use super::{pdu::Pdu, state_key::StateKey}; +use crate::{Result, utils}; /// Abstraction of a PDU so users can have their own PDU types. -pub trait Event { +pub trait Event: Clone + Debug { /// Serialize into a Ruma JSON format, consuming. #[inline] fn into_format(self) -> T @@ -36,6 +42,41 @@ pub trait Event { format::Ref(self).into() } + #[inline] + fn contains_unsigned_property(&self, property: &str, is_type: T) -> bool + where + T: FnOnce(&JsonValue) -> bool, + Self: Sized, + { + unsigned::contains_unsigned_property::(self, property, is_type) + } + + #[inline] + fn get_unsigned_property(&self, property: &str) -> Result + where + T: for<'de> Deserialize<'de>, + Self: Sized, + { + unsigned::get_unsigned_property::(self, property) + } + + #[inline] + fn get_unsigned_as_value(&self) -> JsonValue + where + Self: Sized, + { + unsigned::get_unsigned_as_value(self) + } + + #[inline] + fn get_unsigned(&self) -> Result + where + T: for<'de> Deserialize<'de>, + Self: Sized, + { + unsigned::get_unsigned::(self) + } + #[inline] fn get_content_as_value(&self) -> JsonValue where @@ -69,6 +110,39 @@ pub trait Event { redact::is_redacted(self) } + #[inline] + fn into_canonical_object(self) -> CanonicalJsonObject + where + Self: Sized, + { + utils::to_canonical_object(self.into_pdu()).expect("failed to create Value::Object") + } + + #[inline] + fn to_canonical_object(&self) -> CanonicalJsonObject { + utils::to_canonical_object(self.as_pdu()).expect("failed to create Value::Object") + } + + #[inline] + fn into_value(self) -> JsonValue + where + Self: Sized, + { + serde_json::to_value(self.into_pdu()).expect("failed to create JSON Value") + } + + #[inline] + fn to_value(&self) -> JsonValue { + serde_json::to_value(self.as_pdu()).expect("failed to create JSON Value") + } + + #[inline] + fn as_mut_pdu(&mut self) -> &mut Pdu { unimplemented!("not a mutable Pdu") } + + fn as_pdu(&self) -> &Pdu; + + fn into_pdu(self) -> Pdu; + fn is_owned(&self) -> bool; // @@ -76,7 +150,7 @@ pub trait Event { // /// All the authenticating events for this event. - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_; + fn auth_events(&self) -> impl DoubleEndedIterator + Clone + Send + '_; /// The event's content. fn content(&self) -> &RawJsonValue; @@ -88,7 +162,7 @@ pub trait Event { fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch; /// The events before this event. - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_; + fn prev_events(&self) -> impl DoubleEndedIterator + Clone + Send + '_; /// If this event is a redaction event this is the event it redacts. fn redacts(&self) -> Option<&EventId>; diff --git a/src/core/matrix/event/filter.rs b/src/core/matrix/event/filter.rs new file mode 100644 index 00000000..d3a225b6 --- /dev/null +++ b/src/core/matrix/event/filter.rs @@ -0,0 +1,93 @@ +use ruma::api::client::filter::{RoomEventFilter, UrlFilter}; +use serde_json::Value; + +use super::Event; +use crate::is_equal_to; + +pub trait Matches { + fn matches(&self, event: &E) -> bool; +} + +impl Matches for &RoomEventFilter { + #[inline] + fn matches(&self, event: &E) -> bool { + if !matches_sender(event, self) { + return false; + } + + if !matches_room(event, self) { + return false; + } + + if !matches_type(event, self) { + return false; + } + + if !matches_url(event, self) { + return false; + } + + true + } +} + +fn matches_room(event: &E, filter: &RoomEventFilter) -> bool { + if filter.not_rooms.iter().any(is_equal_to!(event.room_id())) { + return false; + } + + if let Some(rooms) = filter.rooms.as_ref() { + if !rooms.iter().any(is_equal_to!(event.room_id())) { + return false; + } + } + + true +} + +fn matches_sender(event: &E, filter: &RoomEventFilter) -> bool { + if filter.not_senders.iter().any(is_equal_to!(event.sender())) { + return false; + } + + if let Some(senders) = filter.senders.as_ref() { + if !senders.iter().any(is_equal_to!(event.sender())) { + return false; + } + } + + true +} + +fn matches_type(event: &E, filter: &RoomEventFilter) -> bool { + let kind = event.kind().to_cow_str(); + + if filter.not_types.iter().any(is_equal_to!(&kind)) { + return false; + } + + if let Some(types) = filter.types.as_ref() { + if !types.iter().any(is_equal_to!(&kind)) { + return false; + } + } + + true +} + +fn matches_url(event: &E, filter: &RoomEventFilter) -> bool { + let Some(url_filter) = filter.url_filter.as_ref() else { + return true; + }; + + //TODO: might be better to use Ruma's Raw rather than serde here + let url = event + .get_content_as_value() + .get("url") + .is_some_and(Value::is_string); + + match url_filter { + | UrlFilter::EventsWithUrl => url, + | UrlFilter::EventsWithoutUrl => !url, + } +} diff --git a/src/core/matrix/pdu/event_id.rs b/src/core/matrix/event/id.rs similarity index 100% rename from src/core/matrix/pdu/event_id.rs rename to src/core/matrix/event/id.rs diff --git a/src/core/matrix/event/relation.rs b/src/core/matrix/event/relation.rs new file mode 100644 index 00000000..58324e86 --- /dev/null +++ b/src/core/matrix/event/relation.rs @@ -0,0 +1,28 @@ +use ruma::events::relation::RelationType; +use serde::Deserialize; + +use super::Event; + +pub trait RelationTypeEqual { + fn relation_type_equal(&self, event: &E) -> bool; +} + +#[derive(Clone, Debug, Deserialize)] +struct ExtractRelatesToEventId { + #[serde(rename = "m.relates_to")] + relates_to: ExtractRelType, +} + +#[derive(Clone, Debug, Deserialize)] +struct ExtractRelType { + rel_type: RelationType, +} + +impl RelationTypeEqual for RelationType { + fn relation_type_equal(&self, event: &E) -> bool { + event + .get_content() + .map(|c: ExtractRelatesToEventId| c.relates_to.rel_type) + .is_ok_and(|r| r == *self) + } +} diff --git a/src/core/matrix/event/unsigned.rs b/src/core/matrix/event/unsigned.rs new file mode 100644 index 00000000..42928af4 --- /dev/null +++ b/src/core/matrix/event/unsigned.rs @@ -0,0 +1,51 @@ +use serde::Deserialize; +use serde_json::value::Value as JsonValue; + +use super::Event; +use crate::{Result, err, is_true}; + +pub(super) fn contains_unsigned_property(event: &E, property: &str, is_type: F) -> bool +where + F: FnOnce(&JsonValue) -> bool, + E: Event, +{ + get_unsigned_as_value(event) + .get(property) + .map(is_type) + .is_some_and(is_true!()) +} + +pub(super) fn get_unsigned_property(event: &E, property: &str) -> Result +where + T: for<'de> Deserialize<'de>, + E: Event, +{ + get_unsigned_as_value(event) + .get_mut(property) + .map(JsonValue::take) + .map(serde_json::from_value) + .ok_or(err!(Request(NotFound("property not found in unsigned object"))))? + .map_err(|e| err!(Database("Failed to deserialize unsigned.{property} into type: {e}"))) +} + +#[must_use] +pub(super) fn get_unsigned_as_value(event: &E) -> JsonValue +where + E: Event, +{ + get_unsigned::(event).unwrap_or_default() +} + +pub(super) fn get_unsigned(event: &E) -> Result +where + T: for<'de> Deserialize<'de>, + E: Event, +{ + event + .unsigned() + .as_ref() + .map(|raw| raw.get()) + .map(serde_json::from_str) + .ok_or(err!(Request(NotFound("\"unsigned\" property not found in pdu"))))? + .map_err(|e| err!(Database("Failed to deserialize \"unsigned\" into value: {e}"))) +} diff --git a/src/core/matrix/pdu.rs b/src/core/matrix/pdu.rs index e64baeb8..bff0c203 100644 --- a/src/core/matrix/pdu.rs +++ b/src/core/matrix/pdu.rs @@ -1,12 +1,8 @@ mod builder; -mod content; mod count; -mod event_id; -mod filter; mod id; mod raw_id; mod redact; -mod relation; #[cfg(test)] mod tests; mod unsigned; @@ -24,7 +20,6 @@ pub use self::{ Count as PduCount, Id as PduId, Pdu as PduEvent, RawId as RawPduId, builder::{Builder, Builder as PduBuilder}, count::Count, - event_id::*, id::{ShortId, *}, raw_id::*, }; @@ -91,7 +86,7 @@ impl Pdu { impl Event for Pdu { #[inline] - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + fn auth_events(&self) -> impl DoubleEndedIterator + Clone + Send + '_ { self.auth_events.iter().map(AsRef::as_ref) } @@ -107,7 +102,7 @@ impl Event for Pdu { } #[inline] - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + fn prev_events(&self) -> impl DoubleEndedIterator + Clone + Send + '_ { self.prev_events.iter().map(AsRef::as_ref) } @@ -129,13 +124,22 @@ impl Event for Pdu { #[inline] fn unsigned(&self) -> Option<&RawJsonValue> { self.unsigned.as_deref() } + #[inline] + fn as_mut_pdu(&mut self) -> &mut Pdu { self } + + #[inline] + fn as_pdu(&self) -> &Pdu { self } + + #[inline] + fn into_pdu(self) -> Pdu { self } + #[inline] fn is_owned(&self) -> bool { true } } impl Event for &Pdu { #[inline] - fn auth_events(&self) -> impl DoubleEndedIterator + Send + '_ { + fn auth_events(&self) -> impl DoubleEndedIterator + Clone + Send + '_ { self.auth_events.iter().map(AsRef::as_ref) } @@ -151,7 +155,7 @@ impl Event for &Pdu { } #[inline] - fn prev_events(&self) -> impl DoubleEndedIterator + Send + '_ { + fn prev_events(&self) -> impl DoubleEndedIterator + Clone + Send + '_ { self.prev_events.iter().map(AsRef::as_ref) } @@ -173,6 +177,12 @@ impl Event for &Pdu { #[inline] fn unsigned(&self) -> Option<&RawJsonValue> { self.unsigned.as_deref() } + #[inline] + fn as_pdu(&self) -> &Pdu { self } + + #[inline] + fn into_pdu(self) -> Pdu { self.clone() } + #[inline] fn is_owned(&self) -> bool { false } } diff --git a/src/core/matrix/pdu/content.rs b/src/core/matrix/pdu/content.rs deleted file mode 100644 index 4e60ce6e..00000000 --- a/src/core/matrix/pdu/content.rs +++ /dev/null @@ -1,20 +0,0 @@ -use serde::Deserialize; -use serde_json::value::Value as JsonValue; - -use crate::{Result, err, implement}; - -#[must_use] -#[implement(super::Pdu)] -pub fn get_content_as_value(&self) -> JsonValue { - self.get_content() - .expect("pdu content must be a valid JSON value") -} - -#[implement(super::Pdu)] -pub fn get_content(&self) -> Result -where - T: for<'de> Deserialize<'de>, -{ - serde_json::from_str(self.content.get()) - .map_err(|e| err!(Database("Failed to deserialize pdu content into type: {e}"))) -} diff --git a/src/core/matrix/pdu/filter.rs b/src/core/matrix/pdu/filter.rs deleted file mode 100644 index aabf13db..00000000 --- a/src/core/matrix/pdu/filter.rs +++ /dev/null @@ -1,90 +0,0 @@ -use ruma::api::client::filter::{RoomEventFilter, UrlFilter}; -use serde_json::Value; - -use crate::{implement, is_equal_to}; - -#[implement(super::Pdu)] -#[must_use] -pub fn matches(&self, filter: &RoomEventFilter) -> bool { - if !self.matches_sender(filter) { - return false; - } - - if !self.matches_room(filter) { - return false; - } - - if !self.matches_type(filter) { - return false; - } - - if !self.matches_url(filter) { - return false; - } - - true -} - -#[implement(super::Pdu)] -fn matches_room(&self, filter: &RoomEventFilter) -> bool { - if filter.not_rooms.contains(&self.room_id) { - return false; - } - - if let Some(rooms) = filter.rooms.as_ref() { - if !rooms.contains(&self.room_id) { - return false; - } - } - - true -} - -#[implement(super::Pdu)] -fn matches_sender(&self, filter: &RoomEventFilter) -> bool { - if filter.not_senders.contains(&self.sender) { - return false; - } - - if let Some(senders) = filter.senders.as_ref() { - if !senders.contains(&self.sender) { - return false; - } - } - - true -} - -#[implement(super::Pdu)] -fn matches_type(&self, filter: &RoomEventFilter) -> bool { - let event_type = &self.kind.to_cow_str(); - if filter.not_types.iter().any(is_equal_to!(event_type)) { - return false; - } - - if let Some(types) = filter.types.as_ref() { - if !types.iter().any(is_equal_to!(event_type)) { - return false; - } - } - - true -} - -#[implement(super::Pdu)] -fn matches_url(&self, filter: &RoomEventFilter) -> bool { - let Some(url_filter) = filter.url_filter.as_ref() else { - return true; - }; - - //TODO: might be better to use Ruma's Raw rather than serde here - let url = serde_json::from_str::(self.content.get()) - .expect("parsing content JSON failed") - .get("url") - .is_some_and(Value::is_string); - - match url_filter { - | UrlFilter::EventsWithUrl => url, - | UrlFilter::EventsWithoutUrl => !url, - } -} diff --git a/src/core/matrix/pdu/redact.rs b/src/core/matrix/pdu/redact.rs index e6a03209..896e03f8 100644 --- a/src/core/matrix/pdu/redact.rs +++ b/src/core/matrix/pdu/redact.rs @@ -1,10 +1,10 @@ use ruma::{RoomVersionId, canonical_json::redact_content_in_place}; -use serde_json::{json, value::to_raw_value}; +use serde_json::{Value as JsonValue, json, value::to_raw_value}; use crate::{Error, Result, err, implement}; #[implement(super::Pdu)] -pub fn redact(&mut self, room_version_id: &RoomVersionId, reason: &Self) -> Result { +pub fn redact(&mut self, room_version_id: &RoomVersionId, reason: JsonValue) -> Result { self.unsigned = None; let mut content = serde_json::from_str(self.content.get()) diff --git a/src/core/matrix/pdu/relation.rs b/src/core/matrix/pdu/relation.rs deleted file mode 100644 index 2968171e..00000000 --- a/src/core/matrix/pdu/relation.rs +++ /dev/null @@ -1,22 +0,0 @@ -use ruma::events::relation::RelationType; -use serde::Deserialize; - -use crate::implement; - -#[derive(Clone, Debug, Deserialize)] -struct ExtractRelType { - rel_type: RelationType, -} -#[derive(Clone, Debug, Deserialize)] -struct ExtractRelatesToEventId { - #[serde(rename = "m.relates_to")] - relates_to: ExtractRelType, -} - -#[implement(super::Pdu)] -#[must_use] -pub fn relation_type_equal(&self, rel_type: &RelationType) -> bool { - self.get_content() - .map(|c: ExtractRelatesToEventId| c.relates_to.rel_type) - .is_ok_and(|r| r == *rel_type) -} diff --git a/src/core/matrix/pdu/unsigned.rs b/src/core/matrix/pdu/unsigned.rs index 23897519..0c58bb68 100644 --- a/src/core/matrix/pdu/unsigned.rs +++ b/src/core/matrix/pdu/unsigned.rs @@ -1,11 +1,10 @@ use std::collections::BTreeMap; use ruma::MilliSecondsSinceUnixEpoch; -use serde::Deserialize; use serde_json::value::{RawValue as RawJsonValue, Value as JsonValue, to_raw_value}; use super::Pdu; -use crate::{Result, err, implement, is_true}; +use crate::{Result, err, implement}; #[implement(Pdu)] pub fn remove_transaction_id(&mut self) -> Result { @@ -74,43 +73,3 @@ pub fn add_relation(&mut self, name: &str, pdu: Option<&Pdu>) -> Result { Ok(()) } - -#[implement(Pdu)] -pub fn contains_unsigned_property(&self, property: &str, is_type: F) -> bool -where - F: FnOnce(&JsonValue) -> bool, -{ - self.get_unsigned_as_value() - .get(property) - .map(is_type) - .is_some_and(is_true!()) -} - -#[implement(Pdu)] -pub fn get_unsigned_property(&self, property: &str) -> Result -where - T: for<'de> Deserialize<'de>, -{ - self.get_unsigned_as_value() - .get_mut(property) - .map(JsonValue::take) - .map(serde_json::from_value) - .ok_or(err!(Request(NotFound("property not found in unsigned object"))))? - .map_err(|e| err!(Database("Failed to deserialize unsigned.{property} into type: {e}"))) -} - -#[implement(Pdu)] -#[must_use] -pub fn get_unsigned_as_value(&self) -> JsonValue { - self.get_unsigned::().unwrap_or_default() -} - -#[implement(Pdu)] -pub fn get_unsigned(&self) -> Result { - self.unsigned - .as_ref() - .map(|raw| raw.get()) - .map(serde_json::from_str) - .ok_or(err!(Request(NotFound("\"unsigned\" property not found in pdu"))))? - .map_err(|e| err!(Database("Failed to deserialize \"unsigned\" into value: {e}"))) -} diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index ed5aa034..ce9d9276 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -74,7 +74,7 @@ type Result = crate::Result; /// event is part of the same room. //#[tracing::instrument(level = "debug", skip(state_sets, auth_chain_sets, //#[tracing::instrument(level event_fetch))] -pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>( +pub async fn resolve<'a, Pdu, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, ExistsFut>( room_version: &RoomVersionId, state_sets: Sets, auth_chain_sets: &'a [HashSet], @@ -83,14 +83,14 @@ pub async fn resolve<'a, E, Sets, SetIter, Hasher, Fetch, FetchFut, Exists, Exis ) -> Result> where Fetch: Fn(OwnedEventId) -> FetchFut + Sync, - FetchFut: Future> + Send, + FetchFut: Future> + Send, Exists: Fn(OwnedEventId) -> ExistsFut + Sync, ExistsFut: Future + Send, Sets: IntoIterator + Send, SetIter: Iterator> + Clone + Send, Hasher: BuildHasher + Send + Sync, - E: Event + Clone + Send + Sync, - for<'b> &'b E: Event + Send, + Pdu: Event + Clone + Send + Sync, + for<'b> &'b Pdu: Event + Send, { debug!("State resolution starting"); @@ -221,6 +221,7 @@ where let state_sets_iter = state_sets_iter.inspect(|_| state_set_count = state_set_count.saturating_add(1)); + for (k, v) in state_sets_iter.flatten() { occurrences .entry(k) @@ -305,6 +306,7 @@ where let pl = get_power_level_for_sender(&event_id, fetch_event) .await .ok()?; + Some((event_id, pl)) }) .inspect(|(event_id, pl)| { diff --git a/src/core/mods/module.rs b/src/core/mods/module.rs index b65bbca2..bcadf5aa 100644 --- a/src/core/mods/module.rs +++ b/src/core/mods/module.rs @@ -44,6 +44,7 @@ impl Module { .handle .as_ref() .expect("backing library loaded by this instance"); + // SAFETY: Calls dlsym(3) on unix platforms. This might not have to be unsafe // if wrapped in libloading with_dlerror(). let sym = unsafe { handle.get::(cname.as_bytes()) }; diff --git a/src/core/mods/path.rs b/src/core/mods/path.rs index cde251b3..b792890b 100644 --- a/src/core/mods/path.rs +++ b/src/core/mods/path.rs @@ -27,6 +27,7 @@ pub fn to_name(path: &OsStr) -> Result { .expect("path file stem") .to_str() .expect("name string"); + let name = name.strip_prefix("lib").unwrap_or(name).to_owned(); Ok(name) diff --git a/src/core/utils/html.rs b/src/core/utils/html.rs index f2b6d861..eac4c47f 100644 --- a/src/core/utils/html.rs +++ b/src/core/utils/html.rs @@ -23,8 +23,10 @@ impl fmt::Display for Escape<'_> { | '"' => """, | _ => continue, }; + fmt.write_str(&pile_o_bits[last..i])?; fmt.write_str(s)?; + // NOTE: we only expect single byte characters here - which is fine as long as // we only match single byte characters last = i.saturating_add(1); diff --git a/src/core/utils/json.rs b/src/core/utils/json.rs index 3f2f225e..df4ccd13 100644 --- a/src/core/utils/json.rs +++ b/src/core/utils/json.rs @@ -1,4 +1,4 @@ -use std::{fmt, str::FromStr}; +use std::{fmt, marker::PhantomData, str::FromStr}; use ruma::{CanonicalJsonError, CanonicalJsonObject, canonical_json::try_from_json_map}; @@ -11,25 +11,28 @@ use crate::Result; pub fn to_canonical_object( value: T, ) -> Result { + use CanonicalJsonError::SerDe; use serde::ser::Error; - match serde_json::to_value(value).map_err(CanonicalJsonError::SerDe)? { + match serde_json::to_value(value).map_err(SerDe)? { | serde_json::Value::Object(map) => try_from_json_map(map), - | _ => - Err(CanonicalJsonError::SerDe(serde_json::Error::custom("Value must be an object"))), + | _ => Err(SerDe(serde_json::Error::custom("Value must be an object"))), } } -pub fn deserialize_from_str< - 'de, +pub fn deserialize_from_str<'de, D, T, E>(deserializer: D) -> Result +where D: serde::de::Deserializer<'de>, T: FromStr, E: fmt::Display, ->( - deserializer: D, -) -> Result { - struct Visitor, E>(std::marker::PhantomData); - impl, Err: fmt::Display> serde::de::Visitor<'_> for Visitor { +{ + struct Visitor, E>(PhantomData); + + impl serde::de::Visitor<'_> for Visitor + where + T: FromStr, + Err: fmt::Display, + { type Value = T; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -43,5 +46,6 @@ pub fn deserialize_from_str< v.parse().map_err(serde::de::Error::custom) } } - deserializer.deserialize_str(Visitor(std::marker::PhantomData)) + + deserializer.deserialize_str(Visitor(PhantomData)) } diff --git a/src/core/utils/time.rs b/src/core/utils/time.rs index 73f73971..394e08cb 100644 --- a/src/core/utils/time.rs +++ b/src/core/utils/time.rs @@ -105,14 +105,11 @@ pub fn whole_unit(d: Duration) -> Unit { | 86_400.. => Days(d.as_secs() / 86_400), | 3_600..=86_399 => Hours(d.as_secs() / 3_600), | 60..=3_599 => Mins(d.as_secs() / 60), - | _ => match d.as_micros() { | 1_000_000.. => Secs(d.as_secs()), | 1_000..=999_999 => Millis(d.subsec_millis().into()), - | _ => match d.as_nanos() { | 1_000.. => Micros(d.subsec_micros().into()), - | _ => Nanos(d.subsec_nanos().into()), }, }, diff --git a/src/database/watchers.rs b/src/database/watchers.rs index b3907833..efb939d7 100644 --- a/src/database/watchers.rs +++ b/src/database/watchers.rs @@ -37,7 +37,6 @@ impl Watchers { pub(crate) fn wake(&self, key: &[u8]) { let watchers = self.watchers.read().unwrap(); let mut triggered = Vec::new(); - for length in 0..=key.len() { if watchers.contains_key(&key[..length]) { triggered.push(&key[..length]); diff --git a/src/main/logging.rs b/src/main/logging.rs index aec50bd4..36a8896c 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -22,10 +22,12 @@ pub(crate) fn init( let reload_handles = LogLevelReloadHandles::default(); let console_span_events = fmt_span::from_str(&config.log_span_events).unwrap_or_err(); + let console_filter = EnvFilter::builder() .with_regex(config.log_filter_regex) .parse(&config.log) .map_err(|e| err!(Config("log", "{e}.")))?; + let console_layer = fmt::Layer::new() .with_span_events(console_span_events) .event_format(ConsoleFormat::new(config)) @@ -34,6 +36,7 @@ pub(crate) fn init( let (console_reload_filter, console_reload_handle) = reload::Layer::new(console_filter.clone()); + reload_handles.add("console", Box::new(console_reload_handle)); let cap_state = Arc::new(capture::State::new()); @@ -47,8 +50,10 @@ pub(crate) fn init( let subscriber = { let sentry_filter = EnvFilter::try_new(&config.sentry_filter) .map_err(|e| err!(Config("sentry_filter", "{e}.")))?; + let sentry_layer = sentry_tracing::layer(); let (sentry_reload_filter, sentry_reload_handle) = reload::Layer::new(sentry_filter); + reload_handles.add("sentry", Box::new(sentry_reload_handle)); subscriber.with(sentry_layer.with_filter(sentry_reload_filter)) }; @@ -58,12 +63,15 @@ pub(crate) fn init( let (flame_layer, flame_guard) = if config.tracing_flame { let flame_filter = EnvFilter::try_new(&config.tracing_flame_filter) .map_err(|e| err!(Config("tracing_flame_filter", "{e}.")))?; + let (flame_layer, flame_guard) = tracing_flame::FlameLayer::with_file(&config.tracing_flame_output_path) .map_err(|e| err!(Config("tracing_flame_output_path", "{e}.")))?; + let flame_layer = flame_layer .with_empty_samples(false) .with_filter(flame_filter); + (Some(flame_layer), Some(flame_guard)) } else { (None, None) @@ -71,19 +79,24 @@ pub(crate) fn init( let jaeger_filter = EnvFilter::try_new(&config.jaeger_filter) .map_err(|e| err!(Config("jaeger_filter", "{e}.")))?; + let jaeger_layer = config.allow_jaeger.then(|| { opentelemetry::global::set_text_map_propagator( opentelemetry_jaeger::Propagator::new(), ); + let tracer = opentelemetry_jaeger::new_agent_pipeline() .with_auto_split_batch(true) .with_service_name(conduwuit_core::name()) .install_batch(opentelemetry_sdk::runtime::Tokio) .expect("jaeger agent pipeline"); + let telemetry = tracing_opentelemetry::layer().with_tracer(tracer); + let (jaeger_reload_filter, jaeger_reload_handle) = reload::Layer::new(jaeger_filter.clone()); reload_handles.add("jaeger", Box::new(jaeger_reload_handle)); + Some(telemetry.with_filter(jaeger_reload_filter)) }); diff --git a/src/main/mods.rs b/src/main/mods.rs index d585a381..6140cc6e 100644 --- a/src/main/mods.rs +++ b/src/main/mods.rs @@ -51,7 +51,9 @@ pub(crate) async fn run(server: &Arc, starts: bool) -> Result<(bool, boo }, }; } + server.server.stopping.store(false, Ordering::Release); + let run = main_mod.get::("run")?; if let Err(error) = run(server .services @@ -64,7 +66,9 @@ pub(crate) async fn run(server: &Arc, starts: bool) -> Result<(bool, boo error!("Running server: {error}"); return Err(error); } + let reloads = server.server.reloading.swap(false, Ordering::AcqRel); + let stops = !reloads || stale(server).await? <= restart_thresh(); let starts = reloads && stops; if stops { diff --git a/src/main/sentry.rs b/src/main/sentry.rs index 68f12eb7..2a09f415 100644 --- a/src/main/sentry.rs +++ b/src/main/sentry.rs @@ -35,11 +35,13 @@ fn options(config: &Config) -> ClientOptions { .expect("init_sentry should only be called if sentry is enabled and this is not None") .as_str(); + let server_name = config + .sentry_send_server_name + .then(|| config.server_name.to_string().into()); + ClientOptions { dsn: Some(Dsn::from_str(dsn).expect("sentry_endpoint must be a valid URL")), - server_name: config - .sentry_send_server_name - .then(|| config.server_name.to_string().into()), + server_name, traces_sample_rate: config.sentry_traces_sample_rate, debug: cfg!(debug_assertions), release: sentry::release_name!(), diff --git a/src/router/request.rs b/src/router/request.rs index dba90324..3bbeae03 100644 --- a/src/router/request.rs +++ b/src/router/request.rs @@ -98,8 +98,8 @@ async fn execute( fn handle_result(method: &Method, uri: &Uri, result: Response) -> Result { let status = result.status(); - let reason = status.canonical_reason().unwrap_or("Unknown Reason"); let code = status.as_u16(); + let reason = status.canonical_reason().unwrap_or("Unknown Reason"); if status.is_server_error() { error!(method = ?method, uri = ?uri, "{code} {reason}"); diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 66c373ec..d971ce95 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -305,13 +305,13 @@ impl Service { return Ok(()); }; - let response_sender = if self.is_admin_room(&pdu.room_id).await { + let response_sender = if self.is_admin_room(pdu.room_id()).await { &self.services.globals.server_user } else { - &pdu.sender + pdu.sender() }; - self.respond_to_room(content, &pdu.room_id, response_sender) + self.respond_to_room(content, pdu.room_id(), response_sender) .boxed() .await } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 192ef447..baa7a72e 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -293,11 +293,7 @@ impl Service { .state_accessor .room_state_get(event.room_id(), &StateEventType::RoomPowerLevels, "") .await - .and_then(|ev| { - serde_json::from_str(ev.content.get()).map_err(|e| { - err!(Database(error!("invalid m.room.power_levels event: {e:?}"))) - }) - }) + .and_then(|event| event.get_content()) .unwrap_or_default(); let serialized = event.to_format(); diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 866e45a9..7675efd4 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -3,7 +3,7 @@ mod remote; use std::sync::Arc; use conduwuit::{ - Err, Result, Server, err, + Err, Event, Result, Server, err, utils::{ReadyExt, stream::TryIgnore}, }; use database::{Deserialized, Ignore, Interfix, Map}; @@ -241,7 +241,7 @@ impl Service { .room_state_get(&room_id, &StateEventType::RoomCreate, "") .await { - return Ok(event.sender == user_id); + return Ok(event.sender() == user_id); } Err!(Database("Room has no m.room.create event")) diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index b0a7d827..44027e04 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -4,11 +4,13 @@ use std::{ }; use conduwuit::{ - PduEvent, debug, debug_error, debug_warn, implement, pdu, trace, - utils::continue_exponential_backoff_secs, warn, + Event, PduEvent, debug, debug_error, debug_warn, implement, + matrix::event::gen_event_id_canonical_json, trace, utils::continue_exponential_backoff_secs, + warn, }; use ruma::{ - CanonicalJsonValue, OwnedEventId, RoomId, ServerName, api::federation::event::get_event, + CanonicalJsonValue, EventId, OwnedEventId, RoomId, ServerName, + api::federation::event::get_event, }; use super::get_room_version_id; @@ -23,13 +25,17 @@ use super::get_room_version_id; /// c. Ask origin server over federation /// d. TODO: Ask other servers over federation? #[implement(super::Service)] -pub(super) async fn fetch_and_handle_outliers<'a>( +pub(super) async fn fetch_and_handle_outliers<'a, Pdu, Events>( &self, origin: &'a ServerName, - events: &'a [OwnedEventId], - create_event: &'a PduEvent, + events: Events, + create_event: &'a Pdu, room_id: &'a RoomId, -) -> Vec<(PduEvent, Option>)> { +) -> Vec<(PduEvent, Option>)> +where + Pdu: Event + Send + Sync, + Events: Iterator + Clone + Send, +{ let back_off = |id| match self .services .globals @@ -46,22 +52,23 @@ pub(super) async fn fetch_and_handle_outliers<'a>( }, }; - let mut events_with_auth_events = Vec::with_capacity(events.len()); + let mut events_with_auth_events = Vec::with_capacity(events.clone().count()); + for id in events { // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) if let Ok(local_pdu) = self.services.timeline.get_pdu(id).await { - trace!("Found {id} in db"); - events_with_auth_events.push((id, Some(local_pdu), vec![])); + events_with_auth_events.push((id.to_owned(), Some(local_pdu), vec![])); continue; } // c. Ask origin server over federation // We also handle its auth chain here so we don't get a stack overflow in // handle_outlier_pdu. - let mut todo_auth_events: VecDeque<_> = [id.clone()].into(); + let mut todo_auth_events: VecDeque<_> = [id.to_owned()].into(); let mut events_in_reverse_order = Vec::with_capacity(todo_auth_events.len()); + let mut events_all = HashSet::with_capacity(todo_auth_events.len()); while let Some(next_id) = todo_auth_events.pop_front() { if let Some((time, tries)) = self @@ -117,7 +124,7 @@ pub(super) async fn fetch_and_handle_outliers<'a>( }; let Ok((calculated_event_id, value)) = - pdu::gen_event_id_canonical_json(&res.pdu, &room_version_id) + gen_event_id_canonical_json(&res.pdu, &room_version_id) else { back_off((*next_id).to_owned()); continue; @@ -160,7 +167,8 @@ pub(super) async fn fetch_and_handle_outliers<'a>( }, } } - events_with_auth_events.push((id, None, events_in_reverse_order)); + + events_with_auth_events.push((id.to_owned(), None, events_in_reverse_order)); } let mut pdus = Vec::with_capacity(events_with_auth_events.len()); @@ -217,5 +225,6 @@ pub(super) async fn fetch_and_handle_outliers<'a>( } } } + pdus } diff --git a/src/service/rooms/event_handler/fetch_prev.rs b/src/service/rooms/event_handler/fetch_prev.rs index 0f92d6e6..efc7a434 100644 --- a/src/service/rooms/event_handler/fetch_prev.rs +++ b/src/service/rooms/event_handler/fetch_prev.rs @@ -1,13 +1,16 @@ -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::{ + collections::{BTreeMap, HashMap, HashSet, VecDeque}, + iter::once, +}; use conduwuit::{ - PduEvent, Result, debug_warn, err, implement, + Event, PduEvent, Result, debug_warn, err, implement, state_res::{self}, }; use futures::{FutureExt, future}; use ruma::{ - CanonicalJsonValue, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, UInt, int, - uint, + CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, RoomId, ServerName, + int, uint, }; use super::check_room_id; @@ -19,20 +22,26 @@ use super::check_room_id; fields(%origin), )] #[allow(clippy::type_complexity)] -pub(super) async fn fetch_prev( +pub(super) async fn fetch_prev<'a, Pdu, Events>( &self, origin: &ServerName, - create_event: &PduEvent, + create_event: &Pdu, room_id: &RoomId, - first_ts_in_room: UInt, - initial_set: Vec, + first_ts_in_room: MilliSecondsSinceUnixEpoch, + initial_set: Events, ) -> Result<( Vec, HashMap)>, -)> { - let mut graph: HashMap = HashMap::with_capacity(initial_set.len()); +)> +where + Pdu: Event + Send + Sync, + Events: Iterator + Clone + Send, +{ + let num_ids = initial_set.clone().count(); let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: VecDeque = initial_set.into(); + let mut graph: HashMap = HashMap::with_capacity(num_ids); + let mut todo_outlier_stack: VecDeque = + initial_set.map(ToOwned::to_owned).collect(); let mut amount = 0; @@ -40,7 +49,12 @@ pub(super) async fn fetch_prev( self.services.server.check_running()?; match self - .fetch_and_handle_outliers(origin, &[prev_event_id.clone()], create_event, room_id) + .fetch_and_handle_outliers( + origin, + once(prev_event_id.as_ref()), + create_event, + room_id, + ) .boxed() .await .pop() @@ -65,17 +79,17 @@ pub(super) async fn fetch_prev( } if let Some(json) = json_opt { - if pdu.origin_server_ts > first_ts_in_room { + if pdu.origin_server_ts() > first_ts_in_room { amount = amount.saturating_add(1); - for prev_prev in &pdu.prev_events { + for prev_prev in pdu.prev_events() { if !graph.contains_key(prev_prev) { - todo_outlier_stack.push_back(prev_prev.clone()); + todo_outlier_stack.push_back(prev_prev.to_owned()); } } graph.insert( prev_event_id.clone(), - pdu.prev_events.iter().cloned().collect(), + pdu.prev_events().map(ToOwned::to_owned).collect(), ); } else { // Time based check failed @@ -98,8 +112,7 @@ pub(super) async fn fetch_prev( let event_fetch = |event_id| { let origin_server_ts = eventid_info .get(&event_id) - .cloned() - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts); + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts().get()); // This return value is the key used for sorting events, // events are then sorted by power level, time, diff --git a/src/service/rooms/event_handler/fetch_state.rs b/src/service/rooms/event_handler/fetch_state.rs index 0f9e093b..d68a3542 100644 --- a/src/service/rooms/event_handler/fetch_state.rs +++ b/src/service/rooms/event_handler/fetch_state.rs @@ -1,6 +1,6 @@ use std::collections::{HashMap, hash_map}; -use conduwuit::{Err, Error, PduEvent, Result, debug, debug_warn, implement}; +use conduwuit::{Err, Event, Result, debug, debug_warn, err, implement}; use futures::FutureExt; use ruma::{ EventId, OwnedEventId, RoomId, ServerName, api::federation::event::get_room_state_ids, @@ -18,13 +18,16 @@ use crate::rooms::short::ShortStateKey; skip_all, fields(%origin), )] -pub(super) async fn fetch_state( +pub(super) async fn fetch_state( &self, origin: &ServerName, - create_event: &PduEvent, + create_event: &Pdu, room_id: &RoomId, event_id: &EventId, -) -> Result>> { +) -> Result>> +where + Pdu: Event + Send + Sync, +{ let res = self .services .sending @@ -36,27 +39,27 @@ pub(super) async fn fetch_state( .inspect_err(|e| debug_warn!("Fetching state for event failed: {e}"))?; debug!("Fetching state events"); + let state_ids = res.pdu_ids.iter().map(AsRef::as_ref); let state_vec = self - .fetch_and_handle_outliers(origin, &res.pdu_ids, create_event, room_id) + .fetch_and_handle_outliers(origin, state_ids, create_event, room_id) .boxed() .await; let mut state: HashMap = HashMap::with_capacity(state_vec.len()); for (pdu, _) in state_vec { let state_key = pdu - .state_key - .clone() - .ok_or_else(|| Error::bad_database("Found non-state pdu in state events."))?; + .state_key() + .ok_or_else(|| err!(Database("Found non-state pdu in state events.")))?; let shortstatekey = self .services .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key) + .get_or_create_shortstatekey(&pdu.kind().to_string().into(), state_key) .await; match state.entry(shortstatekey) { | hash_map::Entry::Vacant(v) => { - v.insert(pdu.event_id.clone()); + v.insert(pdu.event_id().to_owned()); }, | hash_map::Entry::Occupied(_) => { return Err!(Database( @@ -73,7 +76,7 @@ pub(super) async fn fetch_state( .get_shortstatekey(&StateEventType::RoomCreate, "") .await?; - if state.get(&create_shortstatekey) != Some(&create_event.event_id) { + if state.get(&create_shortstatekey).map(AsRef::as_ref) != Some(create_event.event_id()) { return Err!(Database("Incoming event refers to wrong create event.")); } diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 77cae41d..86a05e0a 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -4,7 +4,7 @@ use std::{ }; use conduwuit::{ - Err, Result, debug, debug::INFO_SPAN_LEVEL, defer, err, implement, utils::stream::IterStream, + Err, Event, Result, debug::INFO_SPAN_LEVEL, defer, err, implement, utils::stream::IterStream, warn, }; use futures::{ @@ -12,6 +12,7 @@ use futures::{ future::{OptionFuture, try_join5}, }; use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UserId, events::StateEventType}; +use tracing::debug; use crate::rooms::timeline::RawPduId; @@ -121,22 +122,16 @@ pub async fn handle_incoming_pdu<'a>( .timeline .first_pdu_in_room(room_id) .await? - .origin_server_ts; + .origin_server_ts(); - if incoming_pdu.origin_server_ts < first_ts_in_room { + if incoming_pdu.origin_server_ts() < first_ts_in_room { return Ok(None); } // 9. Fetch any missing prev events doing all checks listed here starting at 1. // These are timeline events let (sorted_prev_events, mut eventid_info) = self - .fetch_prev( - origin, - create_event, - room_id, - first_ts_in_room, - incoming_pdu.prev_events.clone(), - ) + .fetch_prev(origin, create_event, room_id, first_ts_in_room, incoming_pdu.prev_events()) .await?; debug!( diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 5cc6be55..d79eed77 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, HashMap, hash_map}; use conduwuit::{ - Err, PduEvent, Result, debug, debug_info, err, implement, state_res, trace, warn, + Err, Event, PduEvent, Result, debug, debug_info, err, implement, state_res, trace, warn, }; use futures::future::ready; use ruma::{ @@ -12,15 +12,18 @@ use super::{check_room_id, get_room_version_id, to_room_version}; #[implement(super::Service)] #[allow(clippy::too_many_arguments)] -pub(super) async fn handle_outlier_pdu<'a>( +pub(super) async fn handle_outlier_pdu<'a, Pdu>( &self, origin: &'a ServerName, - create_event: &'a PduEvent, + create_event: &'a Pdu, event_id: &'a EventId, room_id: &'a RoomId, mut value: CanonicalJsonObject, auth_events_known: bool, -) -> Result<(PduEvent, BTreeMap)> { +) -> Result<(PduEvent, BTreeMap)> +where + Pdu: Event + Send + Sync, +{ // 1. Remove unsigned field value.remove("unsigned"); @@ -29,7 +32,7 @@ pub(super) async fn handle_outlier_pdu<'a>( // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match let room_version_id = get_room_version_id(create_event)?; - let mut val = match self + let mut incoming_pdu = match self .services .server_keys .verify_event(&value, Some(&room_version_id)) @@ -61,13 +64,15 @@ pub(super) async fn handle_outlier_pdu<'a>( // Now that we have checked the signature and hashes we can add the eventID and // convert to our PduEvent type - val.insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); - let incoming_pdu = serde_json::from_value::( - serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), + incoming_pdu + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.as_str().to_owned())); + + let pdu_event = serde_json::from_value::( + serde_json::to_value(&incoming_pdu).expect("CanonicalJsonObj is a valid JsonValue"), ) .map_err(|e| err!(Request(BadJson(debug_warn!("Event is not a valid PDU: {e}")))))?; - check_room_id(room_id, &incoming_pdu)?; + check_room_id(room_id, &pdu_event)?; if !auth_events_known { // 4. fetch any missing auth events doing all checks listed here starting at 1. @@ -78,7 +83,7 @@ pub(super) async fn handle_outlier_pdu<'a>( debug!("Fetching auth events"); Box::pin(self.fetch_and_handle_outliers( origin, - &incoming_pdu.auth_events, + pdu_event.auth_events(), create_event, room_id, )) @@ -89,8 +94,8 @@ pub(super) async fn handle_outlier_pdu<'a>( // auth events debug!("Checking based on auth events"); // Build map of auth events - let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len()); - for id in &incoming_pdu.auth_events { + let mut auth_events = HashMap::with_capacity(pdu_event.auth_events().count()); + for id in pdu_event.auth_events() { let Ok(auth_event) = self.services.timeline.get_pdu(id).await else { warn!("Could not find auth event {id}"); continue; @@ -131,7 +136,7 @@ pub(super) async fn handle_outlier_pdu<'a>( let auth_check = state_res::event_auth::auth_check( &to_room_version(&room_version_id), - &incoming_pdu, + &pdu_event, None, // TODO: third party invite state_fetch, ) @@ -147,9 +152,9 @@ pub(super) async fn handle_outlier_pdu<'a>( // 7. Persist the event as an outlier. self.services .outlier - .add_pdu_outlier(&incoming_pdu.event_id, &val); + .add_pdu_outlier(pdu_event.event_id(), &incoming_pdu); trace!("Added pdu as outlier."); - Ok((incoming_pdu, val)) + Ok((pdu_event, incoming_pdu)) } diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index d612b2bf..cd46310a 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -1,10 +1,11 @@ use std::{collections::BTreeMap, time::Instant}; use conduwuit::{ - Err, PduEvent, Result, debug, debug::INFO_SPAN_LEVEL, defer, implement, + Err, Event, PduEvent, Result, debug::INFO_SPAN_LEVEL, defer, implement, utils::continue_exponential_backoff_secs, }; -use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UInt}; +use ruma::{CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName}; +use tracing::debug; #[implement(super::Service)] #[allow(clippy::type_complexity)] @@ -15,16 +16,19 @@ use ruma::{CanonicalJsonValue, EventId, RoomId, ServerName, UInt}; skip_all, fields(%prev_id), )] -pub(super) async fn handle_prev_pdu<'a>( +pub(super) async fn handle_prev_pdu<'a, Pdu>( &self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, eventid_info: Option<(PduEvent, BTreeMap)>, - create_event: &'a PduEvent, - first_ts_in_room: UInt, + create_event: &'a Pdu, + first_ts_in_room: MilliSecondsSinceUnixEpoch, prev_id: &'a EventId, -) -> Result { +) -> Result +where + Pdu: Event + Send + Sync, +{ // Check for disabled again because it might have changed if self.services.metadata.is_disabled(room_id).await { return Err!(Request(Forbidden(debug_warn!( @@ -59,7 +63,7 @@ pub(super) async fn handle_prev_pdu<'a>( }; // Skip old events - if pdu.origin_server_ts < first_ts_in_room { + if pdu.origin_server_ts() < first_ts_in_room { return Ok(()); } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 45675da8..aed38e1e 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -18,7 +18,7 @@ use std::{ }; use async_trait::async_trait; -use conduwuit::{Err, PduEvent, Result, RoomVersion, Server, utils::MutexMap}; +use conduwuit::{Err, Event, PduEvent, Result, RoomVersion, Server, utils::MutexMap}; use ruma::{ OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, events::room::create::RoomCreateEventContent, @@ -104,11 +104,11 @@ impl Service { } } -fn check_room_id(room_id: &RoomId, pdu: &PduEvent) -> Result { - if pdu.room_id != room_id { +fn check_room_id(room_id: &RoomId, pdu: &Pdu) -> Result { + if pdu.room_id() != room_id { return Err!(Request(InvalidParam(error!( - pdu_event_id = ?pdu.event_id, - pdu_room_id = ?pdu.room_id, + pdu_event_id = ?pdu.event_id(), + pdu_room_id = ?pdu.room_id(), ?room_id, "Found event from room in room", )))); @@ -117,7 +117,7 @@ fn check_room_id(room_id: &RoomId, pdu: &PduEvent) -> Result { Ok(()) } -fn get_room_version_id(create_event: &PduEvent) -> Result { +fn get_room_version_id(create_event: &Pdu) -> Result { let content: RoomCreateEventContent = create_event.get_content()?; let room_version = content.room_version; diff --git a/src/service/rooms/event_handler/parse_incoming_pdu.rs b/src/service/rooms/event_handler/parse_incoming_pdu.rs index a49fc541..65cf1752 100644 --- a/src/service/rooms/event_handler/parse_incoming_pdu.rs +++ b/src/service/rooms/event_handler/parse_incoming_pdu.rs @@ -1,4 +1,6 @@ -use conduwuit::{Result, err, implement, pdu::gen_event_id_canonical_json, result::FlatOk}; +use conduwuit::{ + Result, err, implement, matrix::event::gen_event_id_canonical_json, result::FlatOk, +}; use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId}; use serde_json::value::RawValue as RawJsonValue; diff --git a/src/service/rooms/event_handler/state_at_incoming.rs b/src/service/rooms/event_handler/state_at_incoming.rs index eb38c2c3..d3bb8f79 100644 --- a/src/service/rooms/event_handler/state_at_incoming.rs +++ b/src/service/rooms/event_handler/state_at_incoming.rs @@ -6,7 +6,7 @@ use std::{ use conduwuit::{ Result, debug, err, implement, - matrix::{PduEvent, StateMap}, + matrix::{Event, StateMap}, trace, utils::stream::{BroadbandExt, IterStream, ReadyExt, TryBroadbandExt, TryWidebandExt}, }; @@ -19,11 +19,18 @@ use crate::rooms::short::ShortStateHash; #[implement(super::Service)] // request and build the state from a known point and resolve if > 1 prev_event #[tracing::instrument(name = "state", level = "debug", skip_all)] -pub(super) async fn state_at_incoming_degree_one( +pub(super) async fn state_at_incoming_degree_one( &self, - incoming_pdu: &PduEvent, -) -> Result>> { - let prev_event = &incoming_pdu.prev_events[0]; + incoming_pdu: &Pdu, +) -> Result>> +where + Pdu: Event + Send + Sync, +{ + let prev_event = incoming_pdu + .prev_events() + .next() + .expect("at least one prev_event"); + let Ok(prev_event_sstatehash) = self .services .state_accessor @@ -55,7 +62,7 @@ pub(super) async fn state_at_incoming_degree_one( .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key) .await; - state.insert(shortstatekey, prev_event.clone()); + state.insert(shortstatekey, prev_event.to_owned()); // Now it's the state after the pdu } @@ -66,16 +73,18 @@ pub(super) async fn state_at_incoming_degree_one( #[implement(super::Service)] #[tracing::instrument(name = "state", level = "debug", skip_all)] -pub(super) async fn state_at_incoming_resolved( +pub(super) async fn state_at_incoming_resolved( &self, - incoming_pdu: &PduEvent, + incoming_pdu: &Pdu, room_id: &RoomId, room_version_id: &RoomVersionId, -) -> Result>> { +) -> Result>> +where + Pdu: Event + Send + Sync, +{ trace!("Calculating extremity statehashes..."); let Ok(extremity_sstatehashes) = incoming_pdu - .prev_events - .iter() + .prev_events() .try_stream() .broad_and_then(|prev_eventid| { self.services @@ -133,12 +142,15 @@ pub(super) async fn state_at_incoming_resolved( } #[implement(super::Service)] -async fn state_at_incoming_fork( +async fn state_at_incoming_fork( &self, room_id: &RoomId, sstatehash: ShortStateHash, - prev_event: PduEvent, -) -> Result<(StateMap, HashSet)> { + prev_event: Pdu, +) -> Result<(StateMap, HashSet)> +where + Pdu: Event, +{ let mut leaf_state: HashMap<_, _> = self .services .state_accessor @@ -146,15 +158,15 @@ async fn state_at_incoming_fork( .collect() .await; - if let Some(state_key) = &prev_event.state_key { + if let Some(state_key) = prev_event.state_key() { let shortstatekey = self .services .short - .get_or_create_shortstatekey(&prev_event.kind.to_string().into(), state_key) + .get_or_create_shortstatekey(&prev_event.kind().to_string().into(), state_key) .await; - let event_id = &prev_event.event_id; - leaf_state.insert(shortstatekey, event_id.clone()); + let event_id = prev_event.event_id(); + leaf_state.insert(shortstatekey, event_id.to_owned()); // Now it's the state after the pdu } diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 00b18c06..4093cb05 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,7 +1,7 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; use conduwuit::{ - Err, Result, debug, debug_info, err, implement, + Err, Result, debug, debug_info, err, implement, is_equal_to, matrix::{Event, EventTypeExt, PduEvent, StateKey, state_res}, trace, utils::stream::{BroadbandExt, ReadyExt}, @@ -17,19 +17,22 @@ use crate::rooms::{ }; #[implement(super::Service)] -pub(super) async fn upgrade_outlier_to_timeline_pdu( +pub(super) async fn upgrade_outlier_to_timeline_pdu( &self, incoming_pdu: PduEvent, val: BTreeMap, - create_event: &PduEvent, + create_event: &Pdu, origin: &ServerName, room_id: &RoomId, -) -> Result> { +) -> Result> +where + Pdu: Event + Send + Sync, +{ // Skip the PDU if we already have it as a timeline event if let Ok(pduid) = self .services .timeline - .get_pdu_id(&incoming_pdu.event_id) + .get_pdu_id(incoming_pdu.event_id()) .await { return Ok(Some(pduid)); @@ -38,7 +41,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( if self .services .pdu_metadata - .is_event_soft_failed(&incoming_pdu.event_id) + .is_event_soft_failed(incoming_pdu.event_id()) .await { return Err!(Request(InvalidParam("Event has been soft failed"))); @@ -53,7 +56,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // These are not timeline events. debug!("Resolving state at event"); - let mut state_at_incoming_event = if incoming_pdu.prev_events.len() == 1 { + let mut state_at_incoming_event = if incoming_pdu.prev_events().count() == 1 { self.state_at_incoming_degree_one(&incoming_pdu).await? } else { self.state_at_incoming_resolved(&incoming_pdu, room_id, &room_version_id) @@ -62,12 +65,13 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( if state_at_incoming_event.is_none() { state_at_incoming_event = self - .fetch_state(origin, create_event, room_id, &incoming_pdu.event_id) + .fetch_state(origin, create_event, room_id, incoming_pdu.event_id()) .await?; } let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above"); + let room_version = to_room_version(&room_version_id); debug!("Performing auth check"); @@ -99,10 +103,10 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .state .get_auth_events( room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, + incoming_pdu.kind(), + incoming_pdu.sender(), + incoming_pdu.state_key(), + incoming_pdu.content(), ) .await?; @@ -129,7 +133,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( !self .services .state_accessor - .user_can_redact(&redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) + .user_can_redact(&redact_id, incoming_pdu.sender(), incoming_pdu.room_id(), true) .await?, }; @@ -149,7 +153,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .map(ToOwned::to_owned) .ready_filter(|event_id| { // Remove any that are referenced by this incoming event's prev_events - !incoming_pdu.prev_events.contains(event_id) + !incoming_pdu.prev_events().any(is_equal_to!(event_id)) }) .broad_filter_map(|event_id| async move { // Only keep those extremities were not referenced yet @@ -166,7 +170,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( debug!( "Retained {} extremities checked against {} prev_events", extremities.len(), - incoming_pdu.prev_events.len() + incoming_pdu.prev_events().count() ); let state_ids_compressed: Arc = self @@ -181,20 +185,20 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .map(Arc::new) .await; - if incoming_pdu.state_key.is_some() { + if incoming_pdu.state_key().is_some() { debug!("Event is a state-event. Deriving new room state"); // We also add state after incoming event to the fork states let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = &incoming_pdu.state_key { + if let Some(state_key) = incoming_pdu.state_key() { let shortstatekey = self .services .short - .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key) + .get_or_create_shortstatekey(&incoming_pdu.kind().to_string().into(), state_key) .await; - let event_id = &incoming_pdu.event_id; - state_after.insert(shortstatekey, event_id.clone()); + let event_id = incoming_pdu.event_id(); + state_after.insert(shortstatekey, event_id.to_owned()); } let new_room_state = self @@ -236,9 +240,9 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // Soft fail, we keep the event as an outlier but don't add it to the timeline self.services .pdu_metadata - .mark_event_soft_failed(&incoming_pdu.event_id); + .mark_event_soft_failed(incoming_pdu.event_id()); - warn!("Event was soft failed: {incoming_pdu:?}"); + warn!("Event was soft failed: {:?}", incoming_pdu.event_id()); return Err!(Request(InvalidParam("Event has been soft failed"))); } @@ -249,7 +253,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( let extremities = extremities .iter() .map(Borrow::borrow) - .chain(once(incoming_pdu.event_id.borrow())); + .chain(once(incoming_pdu.event_id())); let pdu_id = self .services diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 12b56935..6ab2c026 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use conduwuit::{Result, implement, matrix::pdu::PduEvent}; -use conduwuit_database::{Deserialized, Json, Map}; +use conduwuit::{Result, implement, matrix::PduEvent}; +use database::{Deserialized, Json, Map}; use ruma::{CanonicalJsonObject, EventId}; pub struct Service { diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index f0beab5a..c1376cb0 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,8 +1,8 @@ use std::{mem::size_of, sync::Arc}; use conduwuit::{ - PduCount, PduEvent, arrayvec::ArrayVec, + matrix::{Event, PduCount}, result::LogErr, utils::{ ReadyExt, @@ -33,8 +33,6 @@ struct Services { timeline: Dep, } -pub(super) type PdusIterItem = (PduCount, PduEvent); - impl Data { pub(super) fn new(args: &crate::Args<'_>) -> Self { let db = &args.db; @@ -62,7 +60,7 @@ impl Data { target: ShortEventId, from: PduCount, dir: Direction, - ) -> impl Stream + Send + '_ { + ) -> impl Stream + Send + '_ { let mut current = ArrayVec::::new(); current.extend(target.to_be_bytes()); current.extend(from.saturating_inc(dir).into_unsigned().to_be_bytes()); @@ -80,8 +78,8 @@ impl Data { let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?; - if pdu.sender != user_id { - pdu.remove_transaction_id().log_err().ok(); + if pdu.sender() != user_id { + pdu.as_mut_pdu().remove_transaction_id().log_err().ok(); } Some((shorteventid, pdu)) diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 18221c2d..c8e863fa 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -1,11 +1,14 @@ mod data; use std::sync::Arc; -use conduwuit::{PduCount, Result}; +use conduwuit::{ + Result, + matrix::{Event, PduCount}, +}; use futures::{StreamExt, future::try_join}; use ruma::{EventId, RoomId, UserId, api::Direction}; -use self::data::{Data, PdusIterItem}; +use self::data::Data; use crate::{Dep, rooms}; pub struct Service { @@ -44,16 +47,16 @@ impl Service { } #[allow(clippy::too_many_arguments)] - pub async fn get_relations( - &self, - user_id: &UserId, - room_id: &RoomId, - target: &EventId, + pub async fn get_relations<'a>( + &'a self, + user_id: &'a UserId, + room_id: &'a RoomId, + target: &'a EventId, from: PduCount, limit: usize, max_depth: u8, dir: Direction, - ) -> Vec { + ) -> Vec<(PduCount, impl Event)> { let room_id = self.services.short.get_shortroomid(room_id); let target = self.services.timeline.get_pdu_count(target); diff --git a/src/service/rooms/read_receipt/mod.rs b/src/service/rooms/read_receipt/mod.rs index 69e859c4..68ce9b7f 100644 --- a/src/service/rooms/read_receipt/mod.rs +++ b/src/service/rooms/read_receipt/mod.rs @@ -4,7 +4,10 @@ use std::{collections::BTreeMap, sync::Arc}; use conduwuit::{ Result, debug, err, - matrix::pdu::{PduCount, PduId, RawPduId}, + matrix::{ + Event, + pdu::{PduCount, PduId, RawPduId}, + }, warn, }; use futures::{Stream, TryFutureExt, try_join}; @@ -74,14 +77,13 @@ impl Service { let shortroomid = self.services.short.get_shortroomid(room_id).map_err(|e| { err!(Database(warn!("Short room ID does not exist in database for {room_id}: {e}"))) }); - let (pdu_count, shortroomid) = try_join!(pdu_count, shortroomid)?; + let (pdu_count, shortroomid) = try_join!(pdu_count, shortroomid)?; let shorteventid = PduCount::Normal(pdu_count); let pdu_id: RawPduId = PduId { shortroomid, shorteventid }.into(); - let pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await?; - let event_id: OwnedEventId = pdu.event_id; + let event_id: OwnedEventId = pdu.event_id().to_owned(); let user_id: OwnedUserId = user_id.to_owned(); let content: BTreeMap = BTreeMap::from_iter([( event_id, diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index b9d067a6..afe3061b 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,9 +1,10 @@ use std::sync::Arc; -use conduwuit_core::{ - Event, PduCount, PduEvent, Result, +use conduwuit::{ + PduCount, Result, arrayvec::ArrayVec, implement, + matrix::event::{Event, Matches}, utils::{ ArrayVecExt, IterStream, ReadyExt, set, stream::{TryIgnore, WidebandExt}, @@ -103,9 +104,10 @@ pub fn deindex_pdu(&self, shortroomid: ShortRoomId, pdu_id: &RawPduId, message_b pub async fn search_pdus<'a>( &'a self, query: &'a RoomQuery<'a>, -) -> Result<(usize, impl Stream + Send + 'a)> { +) -> Result<(usize, impl Stream> + Send + '_)> { let pdu_ids: Vec<_> = self.search_pdu_ids(query).await?.collect().await; + let filter = &query.criteria.filter; let count = pdu_ids.len(); let pdus = pdu_ids .into_iter() @@ -118,11 +120,11 @@ pub async fn search_pdus<'a>( .ok() }) .ready_filter(|pdu| !pdu.is_redacted()) - .ready_filter(|pdu| pdu.matches(&query.criteria.filter)) + .ready_filter(move |pdu| filter.matches(pdu)) .wide_filter_map(move |pdu| async move { self.services .state_accessor - .user_can_see_event(query.user_id?, &pdu.room_id, &pdu.event_id) + .user_can_see_event(query.user_id?, pdu.room_id(), pdu.event_id()) .await .then_some(pdu) }) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 9eb02221..641aa6a9 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -356,8 +356,8 @@ impl Service { &self, room_id: &RoomId, shortstatehash: u64, - _mutex_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room - * state mutex */ + // Take mutex guard to make sure users get the room state mutex + _mutex_lock: &RoomMutexGuard, ) { const BUFSIZE: usize = size_of::(); diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index 89fa2a83..89a66f0c 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -2,7 +2,7 @@ use std::borrow::Borrow; use conduwuit::{ Result, err, implement, - matrix::{PduEvent, StateKey}, + matrix::{Event, StateKey}, }; use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{EventId, RoomId, events::StateEventType}; @@ -30,7 +30,7 @@ where pub fn room_state_full<'a>( &'a self, room_id: &'a RoomId, -) -> impl Stream> + Send + 'a { +) -> impl Stream> + Send + 'a { self.services .state .get_room_shortstatehash(room_id) @@ -45,7 +45,7 @@ pub fn room_state_full<'a>( pub fn room_state_full_pdus<'a>( &'a self, room_id: &'a RoomId, -) -> impl Stream> + Send + 'a { +) -> impl Stream> + Send + 'a { self.services .state .get_room_shortstatehash(room_id) @@ -84,7 +84,7 @@ pub async fn room_state_get( room_id: &RoomId, event_type: &StateEventType, state_key: &str, -) -> Result { +) -> Result { self.services .state .get_room_shortstatehash(room_id) diff --git a/src/service/rooms/state_accessor/state.rs b/src/service/rooms/state_accessor/state.rs index 169e69e9..a46ce380 100644 --- a/src/service/rooms/state_accessor/state.rs +++ b/src/service/rooms/state_accessor/state.rs @@ -2,14 +2,14 @@ use std::{borrow::Borrow, ops::Deref, sync::Arc}; use conduwuit::{ Result, at, err, implement, - matrix::{PduEvent, StateKey}, + matrix::{Event, StateKey}, pair_of, utils::{ result::FlatOk, stream::{BroadbandExt, IterStream, ReadyExt, TryIgnore}, }, }; -use conduwuit_database::Deserialized; +use database::Deserialized; use futures::{FutureExt, Stream, StreamExt, TryFutureExt, future::try_join, pin_mut}; use ruma::{ EventId, OwnedEventId, UserId, @@ -125,11 +125,9 @@ pub async fn state_get( shortstatehash: ShortStateHash, event_type: &StateEventType, state_key: &str, -) -> Result { +) -> Result { self.state_get_id(shortstatehash, event_type, state_key) - .and_then(|event_id: OwnedEventId| async move { - self.services.timeline.get_pdu(&event_id).await - }) + .and_then(async |event_id: OwnedEventId| self.services.timeline.get_pdu(&event_id).await) .await } @@ -316,18 +314,16 @@ pub fn state_added( pub fn state_full( &self, shortstatehash: ShortStateHash, -) -> impl Stream + Send + '_ { +) -> impl Stream + Send + '_ { self.state_full_pdus(shortstatehash) - .ready_filter_map(|pdu| { - Some(((pdu.kind.to_string().into(), pdu.state_key.clone()?), pdu)) - }) + .ready_filter_map(|pdu| Some(((pdu.kind().clone().into(), pdu.state_key()?.into()), pdu))) } #[implement(super::Service)] pub fn state_full_pdus( &self, shortstatehash: ShortStateHash, -) -> impl Stream + Send + '_ { +) -> impl Stream + Send + '_ { let short_ids = self .state_full_shortids(shortstatehash) .ignore_err() diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index 67e0b52b..221263a8 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -1,4 +1,4 @@ -use conduwuit::{Err, Result, implement, pdu::PduBuilder}; +use conduwuit::{Err, Result, implement, matrix::Event, pdu::PduBuilder}; use ruma::{ EventId, RoomId, UserId, events::{ @@ -29,14 +29,14 @@ pub async fn user_can_redact( if redacting_event .as_ref() - .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomCreate) + .is_ok_and(|pdu| *pdu.kind() == TimelineEventType::RoomCreate) { return Err!(Request(Forbidden("Redacting m.room.create is not safe, forbidding."))); } if redacting_event .as_ref() - .is_ok_and(|pdu| pdu.kind == TimelineEventType::RoomServerAcl) + .is_ok_and(|pdu| *pdu.kind() == TimelineEventType::RoomServerAcl) { return Err!(Request(Forbidden( "Redacting m.room.server_acl will result in the room being inaccessible for \ @@ -59,9 +59,9 @@ pub async fn user_can_redact( && match redacting_event { | Ok(redacting_event) => if federation { - redacting_event.sender.server_name() == sender.server_name() + redacting_event.sender().server_name() == sender.server_name() } else { - redacting_event.sender == sender + redacting_event.sender() == sender }, | _ => false, }) @@ -72,10 +72,10 @@ pub async fn user_can_redact( .room_state_get(room_id, &StateEventType::RoomCreate, "") .await { - | Ok(room_create) => Ok(room_create.sender == sender + | Ok(room_create) => Ok(room_create.sender() == sender || redacting_event .as_ref() - .is_ok_and(|redacting_event| redacting_event.sender == sender)), + .is_ok_and(|redacting_event| redacting_event.sender() == sender)), | _ => Err!(Database( "No m.room.power_levels or m.room.create events in database for room" )), diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index 9566eb61..59319ba6 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -49,10 +49,9 @@ impl crate::Service for Service { } impl Service { - pub async fn add_to_thread<'a, E>(&self, root_event_id: &EventId, event: &'a E) -> Result + pub async fn add_to_thread(&self, root_event_id: &EventId, event: &E) -> Result where E: Event + Send + Sync, - &'a E: Event + Send, { let root_id = self .services @@ -120,7 +119,7 @@ impl Service { self.services .timeline - .replace_pdu(&root_id, &root_pdu_json, &root_pdu) + .replace_pdu(&root_id, &root_pdu_json) .await?; } @@ -130,7 +129,7 @@ impl Service { users.extend_from_slice(&userids); }, | _ => { - users.push(root_pdu.sender); + users.push(root_pdu.sender().to_owned()); }, } users.push(event.sender().to_owned()); @@ -162,10 +161,10 @@ impl Service { .ready_take_while(move |pdu_id| pdu_id.shortroomid() == shortroomid.to_be_bytes()) .wide_filter_map(move |pdu_id| async move { let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?; - let pdu_id: PduId = pdu_id.into(); - if pdu.sender != user_id { - pdu.remove_transaction_id().ok(); + let pdu_id: PduId = pdu_id.into(); + if pdu.sender() != user_id { + pdu.as_mut_pdu().remove_transaction_id().ok(); } Some((pdu_id.shorteventid, pdu)) diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 94c78bb0..fa10a5c0 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -207,7 +207,6 @@ impl Data { &self, pdu_id: &RawPduId, pdu_json: &CanonicalJsonObject, - _pdu: &PduEvent, ) -> Result { if self.pduid_pdu.get(pdu_id).await.is_not_found() { return Err!(Request(NotFound("PDU does not exist."))); diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index bcad1309..a381fcf6 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -14,8 +14,8 @@ pub use conduwuit::matrix::pdu::{PduId, RawPduId}; use conduwuit::{ Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, matrix::{ - Event, - pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, + event::{Event, gen_event_id}, + pdu::{EventHash, PduBuilder, PduCount, PduEvent}, state_res::{self, RoomVersion}, }, utils::{ @@ -159,12 +159,12 @@ impl crate::Service for Service { impl Service { #[tracing::instrument(skip(self), level = "debug")] - pub async fn first_pdu_in_room(&self, room_id: &RoomId) -> Result { + pub async fn first_pdu_in_room(&self, room_id: &RoomId) -> Result { self.first_item_in_room(room_id).await.map(at!(1)) } #[tracing::instrument(skip(self), level = "debug")] - pub async fn first_item_in_room(&self, room_id: &RoomId) -> Result<(PduCount, PduEvent)> { + pub async fn first_item_in_room(&self, room_id: &RoomId) -> Result<(PduCount, impl Event)> { let pdus = self.pdus(None, room_id, None); pin_mut!(pdus); @@ -174,7 +174,7 @@ impl Service { } #[tracing::instrument(skip(self), level = "debug")] - pub async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result { + pub async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result { self.db.latest_pdu_in_room(None, room_id).await } @@ -216,13 +216,14 @@ impl Service { /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. #[inline] - pub async fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result { + pub async fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result { self.db.get_non_outlier_pdu(event_id).await } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + #[inline] pub async fn get_pdu(&self, event_id: &EventId) -> Result { self.db.get_pdu(event_id).await } @@ -230,11 +231,13 @@ impl Service { /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. + #[inline] pub async fn get_pdu_from_id(&self, pdu_id: &RawPduId) -> Result { self.db.get_pdu_from_id(pdu_id).await } /// Returns the pdu as a `BTreeMap`. + #[inline] pub async fn get_pdu_json_from_id(&self, pdu_id: &RawPduId) -> Result { self.db.get_pdu_json_from_id(pdu_id).await } @@ -242,6 +245,7 @@ impl Service { /// Checks if pdu exists /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + #[inline] pub fn pdu_exists<'a>( &'a self, event_id: &'a EventId, @@ -251,13 +255,8 @@ impl Service { /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self), level = "debug")] - pub async fn replace_pdu( - &self, - pdu_id: &RawPduId, - pdu_json: &CanonicalJsonObject, - pdu: &PduEvent, - ) -> Result<()> { - self.db.replace_pdu(pdu_id, pdu_json, pdu).await + pub async fn replace_pdu(&self, pdu_id: &RawPduId, pdu_json: &CanonicalJsonObject) -> Result { + self.db.replace_pdu(pdu_id, pdu_json).await } /// Creates a new persisted data unit and adds it to a room. @@ -310,25 +309,21 @@ impl Service { unsigned.insert( "prev_content".to_owned(), CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()).map_err( - |e| { - error!( - "Failed to convert prev_state to canonical JSON: {e}" - ); - Error::bad_database( - "Failed to convert prev_state to canonical JSON.", - ) - }, - )?, + utils::to_canonical_object(prev_state.get_content_as_value()) + .map_err(|e| { + err!(Database(error!( + "Failed to convert prev_state to canonical JSON: {e}", + ))) + })?, ), ); unsigned.insert( String::from("prev_sender"), - CanonicalJsonValue::String(prev_state.sender.to_string()), + CanonicalJsonValue::String(prev_state.sender().to_string()), ); unsigned.insert( String::from("replaces_state"), - CanonicalJsonValue::String(prev_state.event_id.to_string()), + CanonicalJsonValue::String(prev_state.event_id().to_string()), ); } } @@ -709,14 +704,11 @@ impl Service { .await { unsigned.insert("prev_content".to_owned(), prev_pdu.get_content_as_value()); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender) - .expect("UserId::to_value always works"), - ); + unsigned + .insert("prev_sender".to_owned(), serde_json::to_value(prev_pdu.sender())?); unsigned.insert( "replaces_state".to_owned(), - serde_json::to_value(&prev_pdu.event_id).expect("EventId is valid json"), + serde_json::to_value(prev_pdu.event_id())?, ); } } @@ -759,7 +751,7 @@ impl Service { unsigned: if unsigned.is_empty() { None } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) + Some(to_raw_value(&unsigned)?) }, hashes: EventHash { sha256: "aaa".to_owned() }, signatures: None, @@ -1041,10 +1033,10 @@ impl Service { /// Replace a PDU with the redacted form. #[tracing::instrument(name = "redact", level = "debug", skip(self))] - pub async fn redact_pdu( + pub async fn redact_pdu( &self, event_id: &EventId, - reason: &PduEvent, + reason: &Pdu, shortroomid: ShortRoomId, ) -> Result { // TODO: Don't reserialize, keep original json @@ -1053,9 +1045,13 @@ impl Service { return Ok(()); }; - let mut pdu = self.get_pdu_from_id(&pdu_id).await.map_err(|e| { - err!(Database(error!(?pdu_id, ?event_id, ?e, "PDU ID points to invalid PDU."))) - })?; + let mut pdu = self + .get_pdu_from_id(&pdu_id) + .await + .map(Event::into_pdu) + .map_err(|e| { + err!(Database(error!(?pdu_id, ?event_id, ?e, "PDU ID points to invalid PDU."))) + })?; if let Ok(content) = pdu.get_content::() { if let Some(body) = content.body { @@ -1065,15 +1061,15 @@ impl Service { } } - let room_version_id = self.services.state.get_room_version(&pdu.room_id).await?; + let room_version_id = self.services.state.get_room_version(pdu.room_id()).await?; - pdu.redact(&room_version_id, reason)?; + pdu.redact(&room_version_id, reason.to_value())?; let obj = utils::to_canonical_object(&pdu).map_err(|e| { err!(Database(error!(?event_id, ?e, "Failed to convert PDU to canonical JSON"))) })?; - self.replace_pdu(&pdu_id, &obj, &pdu).await + self.replace_pdu(&pdu_id, &obj).await } #[tracing::instrument(name = "backfill", level = "debug", skip(self))] @@ -1163,7 +1159,7 @@ impl Service { backfill_server, federation::backfill::get_backfill::v1::Request { room_id: room_id.to_owned(), - v: vec![first_pdu.1.event_id.clone()], + v: vec![first_pdu.1.event_id().to_owned()], limit: uint!(100), }, ) @@ -1248,8 +1244,11 @@ impl Service { #[implement(Service)] #[tracing::instrument(skip_all, level = "debug")] -async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Result<()> { - match &pdu.kind { +async fn check_pdu_for_admin_room(&self, pdu: &Pdu, sender: &UserId) -> Result +where + Pdu: Event + Send + Sync, +{ + match pdu.kind() { | TimelineEventType::RoomEncryption => { return Err!(Request(Forbidden(error!("Encryption not supported in admins room.")))); }, @@ -1273,7 +1272,7 @@ async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Res let count = self .services .state_cache - .room_members(&pdu.room_id) + .room_members(pdu.room_id()) .ready_filter(|user| self.services.globals.user_is_local(user)) .ready_filter(|user| *user != target) .boxed() @@ -1297,7 +1296,7 @@ async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Res let count = self .services .state_cache - .room_members(&pdu.room_id) + .room_members(pdu.room_id()) .ready_filter(|user| self.services.globals.user_is_local(user)) .ready_filter(|user| *user != target) .boxed() diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 408ab17d..a708f746 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -798,7 +798,7 @@ impl Service { let unread: UInt = self .services .user - .notification_count(&user_id, &pdu.room_id) + .notification_count(&user_id, pdu.room_id()) .await .try_into() .expect("notification count can't go that high"); diff --git a/src/service/server_keys/verify.rs b/src/service/server_keys/verify.rs index 84433628..9cc3655a 100644 --- a/src/service/server_keys/verify.rs +++ b/src/service/server_keys/verify.rs @@ -1,4 +1,4 @@ -use conduwuit::{Err, Result, implement, pdu::gen_event_id_canonical_json}; +use conduwuit::{Err, Result, implement, matrix::event::gen_event_id_canonical_json}; use ruma::{ CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomVersionId, signatures::Verified, }; From c06aa49a903a18c87e709a72128662a707cc79ec Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 13 May 2025 21:33:07 +0000 Subject: [PATCH 080/270] Fix regression 75aadd5c6a Signed-off-by: Jason Volk --- src/api/client/user_directory.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/api/client/user_directory.rs b/src/api/client/user_directory.rs index 748fc049..9a1f86b8 100644 --- a/src/api/client/user_directory.rs +++ b/src/api/client/user_directory.rs @@ -1,10 +1,7 @@ use axum::extract::State; use conduwuit::{ Result, - utils::{ - future::BoolExt, - stream::{BroadbandExt, ReadyExt}, - }, + utils::{future::BoolExt, stream::BroadbandExt}, }; use futures::{FutureExt, StreamExt, pin_mut}; use ruma::{ @@ -37,17 +34,18 @@ pub(crate) async fn search_users_route( let mut users = services .users .stream() - .ready_filter(|user_id| user_id.as_str().to_lowercase().contains(&search_term)) .map(ToOwned::to_owned) .broad_filter_map(async |user_id| { let display_name = services.users.displayname(&user_id).await.ok(); + let user_id_matches = user_id.as_str().to_lowercase().contains(&search_term); + let display_name_matches = display_name .as_deref() .map(str::to_lowercase) .is_some_and(|display_name| display_name.contains(&search_term)); - if !display_name_matches { + if !user_id_matches && !display_name_matches { return None; } From c5c309ec4301a3d38bda4a8978a499ffa245f834 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 29 Apr 2025 06:39:30 +0000 Subject: [PATCH 081/270] Split timeline service. Signed-off-by: Jason Volk --- src/api/client/room/initial_sync.rs | 2 +- src/service/rooms/timeline/append.rs | 446 ++++++++++ src/service/rooms/timeline/backfill.rs | 191 +++++ src/service/rooms/timeline/build.rs | 226 +++++ src/service/rooms/timeline/create.rs | 214 +++++ src/service/rooms/timeline/mod.rs | 1084 +----------------------- src/service/rooms/timeline/redact.rs | 51 ++ 7 files changed, 1146 insertions(+), 1068 deletions(-) create mode 100644 src/service/rooms/timeline/append.rs create mode 100644 src/service/rooms/timeline/backfill.rs create mode 100644 src/service/rooms/timeline/build.rs create mode 100644 src/service/rooms/timeline/create.rs create mode 100644 src/service/rooms/timeline/redact.rs diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index 2aca5b9d..d40f6b4f 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -1,7 +1,7 @@ use axum::extract::State; use conduwuit::{ Err, Event, Result, at, - utils::{BoolExt, future::TryExtExt, stream::TryTools}, + utils::{BoolExt, stream::TryTools}, }; use futures::{FutureExt, TryStreamExt, future::try_join4}; use ruma::api::client::room::initial_sync::v3::{PaginationChunk, Request, Response}; diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs new file mode 100644 index 00000000..a7b558c2 --- /dev/null +++ b/src/service/rooms/timeline/append.rs @@ -0,0 +1,446 @@ +use std::{ + collections::{BTreeMap, HashSet}, + sync::Arc, +}; + +use conduwuit_core::{ + Result, err, error, implement, + matrix::{ + event::Event, + pdu::{PduCount, PduEvent, PduId, RawPduId}, + }, + utils::{self, ReadyExt}, +}; +use futures::StreamExt; +use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, EventId, RoomVersionId, UserId, + events::{ + GlobalAccountDataEventType, StateEventType, TimelineEventType, + push_rules::PushRulesEvent, + room::{ + encrypted::Relation, + member::{MembershipState, RoomMemberEventContent}, + power_levels::RoomPowerLevelsEventContent, + redaction::RoomRedactionEventContent, + }, + }, + push::{Action, Ruleset, Tweak}, +}; + +use super::{ExtractBody, ExtractRelatesTo, ExtractRelatesToEventId, RoomMutexGuard}; +use crate::{appservice::NamespaceRegex, rooms::state_compressor::CompressedState}; + +/// Append the incoming event setting the state snapshot to the state from +/// the server that sent the event. +#[implement(super::Service)] +#[tracing::instrument(level = "debug", skip_all)] +pub async fn append_incoming_pdu<'a, Leaves>( + &'a self, + pdu: &'a PduEvent, + pdu_json: CanonicalJsonObject, + new_room_leaves: Leaves, + state_ids_compressed: Arc, + soft_fail: bool, + state_lock: &'a RoomMutexGuard, +) -> Result> +where + Leaves: Iterator + Send + 'a, +{ + // We append to state before appending the pdu, so we don't have a moment in + // time with the pdu without it's state. This is okay because append_pdu can't + // fail. + self.services + .state + .set_event_state(&pdu.event_id, &pdu.room_id, state_ids_compressed) + .await?; + + if soft_fail { + self.services + .pdu_metadata + .mark_as_referenced(&pdu.room_id, pdu.prev_events.iter().map(AsRef::as_ref)); + + self.services + .state + .set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock) + .await; + + return Ok(None); + } + + let pdu_id = self + .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) + .await?; + + Ok(Some(pdu_id)) +} + +/// Creates a new persisted data unit and adds it to a room. +/// +/// By this point the incoming event should be fully authenticated, no auth +/// happens in `append_pdu`. +/// +/// Returns pdu id +#[implement(super::Service)] +#[tracing::instrument(level = "debug", skip_all)] +pub async fn append_pdu<'a, Leaves>( + &'a self, + pdu: &'a PduEvent, + mut pdu_json: CanonicalJsonObject, + leaves: Leaves, + state_lock: &'a RoomMutexGuard, +) -> Result +where + Leaves: Iterator + Send + 'a, +{ + // Coalesce database writes for the remainder of this scope. + let _cork = self.db.db.cork_and_flush(); + + let shortroomid = self + .services + .short + .get_shortroomid(pdu.room_id()) + .await + .map_err(|_| err!(Database("Room does not exist")))?; + + // Make unsigned fields correct. This is not properly documented in the spec, + // but state events need to have previous content in the unsigned field, so + // clients can easily interpret things like membership changes + if let Some(state_key) = pdu.state_key() { + if let CanonicalJsonValue::Object(unsigned) = pdu_json + .entry("unsigned".to_owned()) + .or_insert_with(|| CanonicalJsonValue::Object(BTreeMap::default())) + { + if let Ok(shortstatehash) = self + .services + .state_accessor + .pdu_shortstatehash(pdu.event_id()) + .await + { + if let Ok(prev_state) = self + .services + .state_accessor + .state_get(shortstatehash, &pdu.kind().to_string().into(), state_key) + .await + { + unsigned.insert( + "prev_content".to_owned(), + CanonicalJsonValue::Object( + utils::to_canonical_object(prev_state.get_content_as_value()) + .map_err(|e| { + err!(Database(error!( + "Failed to convert prev_state to canonical JSON: {e}", + ))) + })?, + ), + ); + unsigned.insert( + String::from("prev_sender"), + CanonicalJsonValue::String(prev_state.sender().to_string()), + ); + unsigned.insert( + String::from("replaces_state"), + CanonicalJsonValue::String(prev_state.event_id().to_string()), + ); + } + } + } else { + error!("Invalid unsigned type in pdu."); + } + } + + // We must keep track of all events that have been referenced. + self.services + .pdu_metadata + .mark_as_referenced(pdu.room_id(), pdu.prev_events().map(AsRef::as_ref)); + + self.services + .state + .set_forward_extremities(pdu.room_id(), leaves, state_lock) + .await; + + let insert_lock = self.mutex_insert.lock(pdu.room_id()).await; + + let count1 = self.services.globals.next_count().unwrap(); + + // Mark as read first so the sending client doesn't get a notification even if + // appending fails + self.services + .read_receipt + .private_read_set(pdu.room_id(), pdu.sender(), count1); + + self.services + .user + .reset_notification_counts(pdu.sender(), pdu.room_id()); + + let count2 = PduCount::Normal(self.services.globals.next_count().unwrap()); + let pdu_id: RawPduId = PduId { shortroomid, shorteventid: count2 }.into(); + + // Insert pdu + self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2).await; + + drop(insert_lock); + + // See if the event matches any known pushers via power level + let power_levels: RoomPowerLevelsEventContent = self + .services + .state_accessor + .room_state_get_content(pdu.room_id(), &StateEventType::RoomPowerLevels, "") + .await + .unwrap_or_default(); + + let mut push_target: HashSet<_> = self + .services + .state_cache + .active_local_users_in_room(pdu.room_id()) + .map(ToOwned::to_owned) + // Don't notify the sender of their own events, and dont send from ignored users + .ready_filter(|user| *user != pdu.sender()) + .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(pdu.sender(), &recipient_user).await).then_some(recipient_user) }) + .collect() + .await; + + let mut notifies = Vec::with_capacity(push_target.len().saturating_add(1)); + let mut highlights = Vec::with_capacity(push_target.len().saturating_add(1)); + + if *pdu.kind() == TimelineEventType::RoomMember { + if let Some(state_key) = pdu.state_key() { + let target_user_id = UserId::parse(state_key)?; + + if self.services.users.is_active_local(target_user_id).await { + push_target.insert(target_user_id.to_owned()); + } + } + } + + let serialized = pdu.to_format(); + for user in &push_target { + let rules_for_user = self + .services + .account_data + .get_global(user, GlobalAccountDataEventType::PushRules) + .await + .map_or_else( + |_| Ruleset::server_default(user), + |ev: PushRulesEvent| ev.content.global, + ); + + let mut highlight = false; + let mut notify = false; + + for action in self + .services + .pusher + .get_actions(user, &rules_for_user, &power_levels, &serialized, pdu.room_id()) + .await + { + match action { + | Action::Notify => notify = true, + | Action::SetTweak(Tweak::Highlight(true)) => { + highlight = true; + }, + | _ => {}, + } + + // Break early if both conditions are true + if notify && highlight { + break; + } + } + + if notify { + notifies.push(user.clone()); + } + + if highlight { + highlights.push(user.clone()); + } + + self.services + .pusher + .get_pushkeys(user) + .ready_for_each(|push_key| { + self.services + .sending + .send_pdu_push(&pdu_id, user, push_key.to_owned()) + .expect("TODO: replace with future"); + }) + .await; + } + + self.db + .increment_notification_counts(pdu.room_id(), notifies, highlights); + + match *pdu.kind() { + | TimelineEventType::RoomRedaction => { + use RoomVersionId::*; + + let room_version_id = self.services.state.get_room_version(pdu.room_id()).await?; + match room_version_id { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + if let Some(redact_id) = pdu.redacts() { + if self + .services + .state_accessor + .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) + .await? + { + self.redact_pdu(redact_id, pdu, shortroomid).await?; + } + } + }, + | _ => { + let content: RoomRedactionEventContent = pdu.get_content()?; + if let Some(redact_id) = &content.redacts { + if self + .services + .state_accessor + .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) + .await? + { + self.redact_pdu(redact_id, pdu, shortroomid).await?; + } + } + }, + } + }, + | TimelineEventType::SpaceChild => + if let Some(_state_key) = pdu.state_key() { + self.services + .spaces + .roomid_spacehierarchy_cache + .lock() + .await + .remove(pdu.room_id()); + }, + | TimelineEventType::RoomMember => { + if let Some(state_key) = pdu.state_key() { + // if the state_key fails + let target_user_id = + UserId::parse(state_key).expect("This state_key was previously validated"); + + let content: RoomMemberEventContent = pdu.get_content()?; + let stripped_state = match content.membership { + | MembershipState::Invite | MembershipState::Knock => + self.services.state.summary_stripped(pdu).await.into(), + | _ => None, + }; + + // Update our membership info, we do this here incase a user is invited or + // knocked and immediately leaves we need the DB to record the invite or + // knock event for auth + self.services + .state_cache + .update_membership( + pdu.room_id(), + target_user_id, + content, + pdu.sender(), + stripped_state, + None, + true, + ) + .await?; + } + }, + | TimelineEventType::RoomMessage => { + let content: ExtractBody = pdu.get_content()?; + if let Some(body) = content.body { + self.services.search.index_pdu(shortroomid, &pdu_id, &body); + + if self.services.admin.is_admin_command(pdu, &body).await { + self.services + .admin + .command_with_sender(body, Some((pdu.event_id()).into()), pdu.sender.clone().into())?; + } + } + }, + | _ => {}, + } + + if let Ok(content) = pdu.get_content::() { + if let Ok(related_pducount) = self.get_pdu_count(&content.relates_to.event_id).await { + self.services + .pdu_metadata + .add_relation(count2, related_pducount); + } + } + + if let Ok(content) = pdu.get_content::() { + match content.relates_to { + | Relation::Reply { in_reply_to } => { + // We need to do it again here, because replies don't have + // event_id as a top level field + if let Ok(related_pducount) = self.get_pdu_count(&in_reply_to.event_id).await { + self.services + .pdu_metadata + .add_relation(count2, related_pducount); + } + }, + | Relation::Thread(thread) => { + self.services + .threads + .add_to_thread(&thread.event_id, pdu) + .await?; + }, + | _ => {}, // TODO: Aggregate other types + } + } + + for appservice in self.services.appservice.read().await.values() { + if self + .services + .state_cache + .appservice_in_room(pdu.room_id(), appservice) + .await + { + self.services + .sending + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + continue; + } + + // If the RoomMember event has a non-empty state_key, it is targeted at someone. + // If it is our appservice user, we send this PDU to it. + if *pdu.kind() == TimelineEventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + let appservice_uid = appservice.registration.sender_localpart.as_str(); + if state_key_uid == &appservice_uid { + self.services + .sending + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + continue; + } + } + } + + let matching_users = |users: &NamespaceRegex| { + appservice.users.is_match(pdu.sender().as_str()) + || *pdu.kind() == TimelineEventType::RoomMember + && pdu + .state_key + .as_ref() + .is_some_and(|state_key| users.is_match(state_key)) + }; + let matching_aliases = |aliases: NamespaceRegex| { + self.services + .alias + .local_aliases_for_room(pdu.room_id()) + .ready_any(move |room_alias| aliases.is_match(room_alias.as_str())) + }; + + if matching_aliases(appservice.aliases.clone()).await + || appservice.rooms.is_match(pdu.room_id().as_str()) + || matching_users(&appservice.users) + { + self.services + .sending + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + } + } + + Ok(pdu_id) +} diff --git a/src/service/rooms/timeline/backfill.rs b/src/service/rooms/timeline/backfill.rs new file mode 100644 index 00000000..e976981e --- /dev/null +++ b/src/service/rooms/timeline/backfill.rs @@ -0,0 +1,191 @@ +use std::iter::once; + +use conduwuit_core::{ + Result, debug, debug_warn, implement, info, + matrix::{ + event::Event, + pdu::{PduCount, PduId, RawPduId}, + }, + utils::{IterStream, ReadyExt}, + validated, warn, +}; +use futures::{FutureExt, StreamExt}; +use ruma::{ + RoomId, ServerName, + api::federation, + events::{ + StateEventType, TimelineEventType, room::power_levels::RoomPowerLevelsEventContent, + }, + uint, +}; +use serde_json::value::RawValue as RawJsonValue; + +use super::ExtractBody; + +#[implement(super::Service)] +#[tracing::instrument(name = "backfill", level = "debug", skip(self))] +pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { + if self + .services + .state_cache + .room_joined_count(room_id) + .await + .is_ok_and(|count| count <= 1) + && !self + .services + .state_accessor + .is_world_readable(room_id) + .await + { + // Room is empty (1 user or none), there is no one that can backfill + return Ok(()); + } + + let first_pdu = self + .first_item_in_room(room_id) + .await + .expect("Room is not empty"); + + if first_pdu.0 < from { + // No backfill required, there are still events between them + return Ok(()); + } + + let power_levels: RoomPowerLevelsEventContent = self + .services + .state_accessor + .room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "") + .await + .unwrap_or_default(); + + let room_mods = power_levels.users.iter().filter_map(|(user_id, level)| { + if level > &power_levels.users_default && !self.services.globals.user_is_local(user_id) { + Some(user_id.server_name()) + } else { + None + } + }); + + let canonical_room_alias_server = once( + self.services + .state_accessor + .get_canonical_alias(room_id) + .await, + ) + .filter_map(Result::ok) + .map(|alias| alias.server_name().to_owned()) + .stream(); + + let mut servers = room_mods + .stream() + .map(ToOwned::to_owned) + .chain(canonical_room_alias_server) + .chain( + self.services + .server + .config + .trusted_servers + .iter() + .map(ToOwned::to_owned) + .stream(), + ) + .ready_filter(|server_name| !self.services.globals.server_is_ours(server_name)) + .filter_map(|server_name| async move { + self.services + .state_cache + .server_in_room(&server_name, room_id) + .await + .then_some(server_name) + }) + .boxed(); + + while let Some(ref backfill_server) = servers.next().await { + info!("Asking {backfill_server} for backfill"); + let response = self + .services + .sending + .send_federation_request( + backfill_server, + federation::backfill::get_backfill::v1::Request { + room_id: room_id.to_owned(), + v: vec![first_pdu.1.event_id().to_owned()], + limit: uint!(100), + }, + ) + .await; + match response { + | Ok(response) => { + for pdu in response.pdus { + if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await { + debug_warn!("Failed to add backfilled pdu in room {room_id}: {e}"); + } + } + return Ok(()); + }, + | Err(e) => { + warn!("{backfill_server} failed to provide backfill for room {room_id}: {e}"); + }, + } + } + + info!("No servers could backfill, but backfill was needed in room {room_id}"); + Ok(()) +} + +#[implement(super::Service)] +#[tracing::instrument(skip(self, pdu), level = "debug")] +pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box) -> Result<()> { + let (room_id, event_id, value) = self.services.event_handler.parse_incoming_pdu(&pdu).await?; + + // Lock so we cannot backfill the same pdu twice at the same time + let mutex_lock = self + .services + .event_handler + .mutex_federation + .lock(&room_id) + .await; + + // Skip the PDU if we already have it as a timeline event + if let Ok(pdu_id) = self.get_pdu_id(&event_id).await { + debug!("We already know {event_id} at {pdu_id:?}"); + return Ok(()); + } + + self.services + .event_handler + .handle_incoming_pdu(origin, &room_id, &event_id, value, false) + .boxed() + .await?; + + let value = self.get_pdu_json(&event_id).await?; + + let pdu = self.get_pdu(&event_id).await?; + + let shortroomid = self.services.short.get_shortroomid(&room_id).await?; + + let insert_lock = self.mutex_insert.lock(&room_id).await; + + let count: i64 = self.services.globals.next_count().unwrap().try_into()?; + + let pdu_id: RawPduId = PduId { + shortroomid, + shorteventid: PduCount::Backfilled(validated!(0 - count)), + } + .into(); + + // Insert pdu + self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value); + + drop(insert_lock); + + if pdu.kind == TimelineEventType::RoomMessage { + let content: ExtractBody = pdu.get_content()?; + if let Some(body) = content.body { + self.services.search.index_pdu(shortroomid, &pdu_id, &body); + } + } + drop(mutex_lock); + + debug!("Prepended backfill pdu"); + Ok(()) +} diff --git a/src/service/rooms/timeline/build.rs b/src/service/rooms/timeline/build.rs new file mode 100644 index 00000000..a522c531 --- /dev/null +++ b/src/service/rooms/timeline/build.rs @@ -0,0 +1,226 @@ +use std::{collections::HashSet, iter::once}; + +use conduwuit_core::{ + Err, Result, implement, + matrix::{event::Event, pdu::PduBuilder}, + utils::{IterStream, ReadyExt}, +}; +use futures::{FutureExt, StreamExt}; +use ruma::{ + OwnedEventId, OwnedServerName, RoomId, RoomVersionId, UserId, + events::{ + TimelineEventType, + room::{ + member::{MembershipState, RoomMemberEventContent}, + redaction::RoomRedactionEventContent, + }, + }, +}; + +use super::RoomMutexGuard; + +/// Creates a new persisted data unit and adds it to a room. This function +/// takes a roomid_mutex_state, meaning that only this function is able to +/// mutate the room state. +#[implement(super::Service)] +#[tracing::instrument(skip(self, state_lock), level = "debug")] +pub async fn build_and_append_pdu( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + state_lock: &RoomMutexGuard, +) -> Result { + let (pdu, pdu_json) = self + .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) + .await?; + + if self.services.admin.is_admin_room(pdu.room_id()).await { + self.check_pdu_for_admin_room(&pdu, sender).boxed().await?; + } + + // If redaction event is not authorized, do not append it to the timeline + if *pdu.kind() == TimelineEventType::RoomRedaction { + use RoomVersionId::*; + match self.services.state.get_room_version(pdu.room_id()).await? { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + if let Some(redact_id) = pdu.redacts() { + if !self + .services + .state_accessor + .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) + .await? + { + return Err!(Request(Forbidden("User cannot redact this event."))); + } + } + }, + | _ => { + let content: RoomRedactionEventContent = pdu.get_content()?; + if let Some(redact_id) = &content.redacts { + if !self + .services + .state_accessor + .user_can_redact(redact_id, pdu.sender(), pdu.room_id(), false) + .await? + { + return Err!(Request(Forbidden("User cannot redact this event."))); + } + } + }, + } + } + + if *pdu.kind() == TimelineEventType::RoomMember { + let content: RoomMemberEventContent = pdu.get_content()?; + + if content.join_authorized_via_users_server.is_some() + && content.membership != MembershipState::Join + { + return Err!(Request(BadJson( + "join_authorised_via_users_server is only for member joins" + ))); + } + + if content + .join_authorized_via_users_server + .as_ref() + .is_some_and(|authorising_user| { + !self.services.globals.user_is_local(authorising_user) + }) { + return Err!(Request(InvalidParam( + "Authorising user does not belong to this homeserver" + ))); + } + } + + // We append to state before appending the pdu, so we don't have a moment in + // time with the pdu without it's state. This is okay because append_pdu can't + // fail. + let statehashid = self.services.state.append_to_state(&pdu).await?; + + let pdu_id = self + .append_pdu( + &pdu, + pdu_json, + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + once(pdu.event_id()), + state_lock, + ) + .boxed() + .await?; + + // We set the room state after inserting the pdu, so that we never have a moment + // in time where events in the current room state do not exist + self.services + .state + .set_room_state(pdu.room_id(), statehashid, state_lock); + + let mut servers: HashSet = self + .services + .state_cache + .room_servers(pdu.room_id()) + .map(ToOwned::to_owned) + .collect() + .await; + + // In case we are kicking or banning a user, we need to inform their server of + // the change + if *pdu.kind() == TimelineEventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + servers.insert(state_key_uid.server_name().to_owned()); + } + } + + // Remove our server from the server list since it will be added to it by + // room_servers() and/or the if statement above + servers.remove(self.services.globals.server_name()); + + self.services + .sending + .send_pdu_servers(servers.iter().map(AsRef::as_ref).stream(), &pdu_id) + .await?; + + Ok(pdu.event_id().to_owned()) +} + +#[implement(super::Service)] +#[tracing::instrument(skip_all, level = "debug")] +async fn check_pdu_for_admin_room(&self, pdu: &Pdu, sender: &UserId) -> Result +where + Pdu: Event + Send + Sync, +{ + match pdu.kind() { + | TimelineEventType::RoomEncryption => { + return Err!(Request(Forbidden(error!("Encryption not supported in admins room.")))); + }, + | TimelineEventType::RoomMember => { + let target = pdu + .state_key() + .filter(|v| v.starts_with('@')) + .unwrap_or(sender.as_str()); + + let server_user = &self.services.globals.server_user.to_string(); + + let content: RoomMemberEventContent = pdu.get_content()?; + match content.membership { + | MembershipState::Leave => { + if target == server_user { + return Err!(Request(Forbidden(error!( + "Server user cannot leave the admins room." + )))); + } + + let count = self + .services + .state_cache + .room_members(pdu.room_id()) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .ready_filter(|user| *user != target) + .boxed() + .count() + .await; + + if count < 2 { + return Err!(Request(Forbidden(error!( + "Last admin cannot leave the admins room." + )))); + } + }, + + | MembershipState::Ban if pdu.state_key().is_some() => { + if target == server_user { + return Err!(Request(Forbidden(error!( + "Server cannot be banned from admins room." + )))); + } + + let count = self + .services + .state_cache + .room_members(pdu.room_id()) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .ready_filter(|user| *user != target) + .boxed() + .count() + .await; + + if count < 2 { + return Err!(Request(Forbidden(error!( + "Last admin cannot be banned from admins room." + )))); + } + }, + | _ => {}, + } + }, + | _ => {}, + } + + Ok(()) +} diff --git a/src/service/rooms/timeline/create.rs b/src/service/rooms/timeline/create.rs new file mode 100644 index 00000000..d890e88e --- /dev/null +++ b/src/service/rooms/timeline/create.rs @@ -0,0 +1,214 @@ +use std::cmp; + +use conduwuit_core::{ + Err, Error, Result, err, implement, + matrix::{ + event::{Event, gen_event_id}, + pdu::{EventHash, PduBuilder, PduEvent}, + state_res::{self, RoomVersion}, + }, + utils::{self, IterStream, ReadyExt, stream::TryIgnore}, +}; +use futures::{StreamExt, TryStreamExt, future, future::ready}; +use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, RoomId, RoomVersionId, UserId, + canonical_json::to_canonical_value, + events::{StateEventType, TimelineEventType, room::create::RoomCreateEventContent}, + uint, +}; +use serde_json::value::to_raw_value; +use tracing::warn; + +use super::RoomMutexGuard; + +#[implement(super::Service)] +pub async fn create_hash_and_sign_event( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + _mutex_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room + * state mutex */ +) -> Result<(PduEvent, CanonicalJsonObject)> { + let PduBuilder { + event_type, + content, + unsigned, + state_key, + redacts, + timestamp, + } = pdu_builder; + + let prev_events: Vec = self + .services + .state + .get_forward_extremities(room_id) + .take(20) + .map(Into::into) + .collect() + .await; + + // If there was no create event yet, assume we are creating a room + let room_version_id = self + .services + .state + .get_room_version(room_id) + .await + .or_else(|_| { + if event_type == TimelineEventType::RoomCreate { + let content: RoomCreateEventContent = serde_json::from_str(content.get())?; + Ok(content.room_version) + } else { + Err(Error::InconsistentRoomState( + "non-create event for room of unknown version", + room_id.to_owned(), + )) + } + })?; + + let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); + + let auth_events = self + .services + .state + .get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content) + .await?; + + // Our depth is the maximum depth of prev_events + 1 + let depth = prev_events + .iter() + .stream() + .map(Ok) + .and_then(|event_id| self.get_pdu(event_id)) + .and_then(|pdu| future::ok(pdu.depth)) + .ignore_err() + .ready_fold(uint!(0), cmp::max) + .await + .saturating_add(uint!(1)); + + let mut unsigned = unsigned.unwrap_or_default(); + + if let Some(state_key) = &state_key { + if let Ok(prev_pdu) = self + .services + .state_accessor + .room_state_get(room_id, &event_type.to_string().into(), state_key) + .await + { + unsigned.insert("prev_content".to_owned(), prev_pdu.get_content_as_value()); + unsigned.insert("prev_sender".to_owned(), serde_json::to_value(prev_pdu.sender())?); + unsigned + .insert("replaces_state".to_owned(), serde_json::to_value(prev_pdu.event_id())?); + } + } + + if event_type != TimelineEventType::RoomCreate && prev_events.is_empty() { + return Err!(Request(Unknown("Event incorrectly had zero prev_events."))); + } + if state_key.is_none() && depth.lt(&uint!(2)) { + // The first two events in a room are always m.room.create and m.room.member, + // so any other events with that same depth are illegal. + warn!( + "Had unsafe depth {depth} when creating non-state event in {room_id}. Cowardly \ + aborting" + ); + return Err!(Request(Unknown("Unsafe depth for non-state event."))); + } + + let mut pdu = PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater").into(), + room_id: room_id.to_owned(), + sender: sender.to_owned(), + origin: None, + origin_server_ts: timestamp.map_or_else( + || { + utils::millis_since_unix_epoch() + .try_into() + .expect("u64 fits into UInt") + }, + |ts| ts.get(), + ), + kind: event_type, + content, + state_key, + prev_events, + depth, + auth_events: auth_events + .values() + .map(|pdu| pdu.event_id.clone()) + .collect(), + redacts, + unsigned: if unsigned.is_empty() { + None + } else { + Some(to_raw_value(&unsigned)?) + }, + hashes: EventHash { sha256: "aaa".to_owned() }, + signatures: None, + }; + + let auth_fetch = |k: &StateEventType, s: &str| { + let key = (k.clone(), s.into()); + ready(auth_events.get(&key).map(ToOwned::to_owned)) + }; + + let auth_check = state_res::auth_check( + &room_version, + &pdu, + None, // TODO: third_party_invite + auth_fetch, + ) + .await + .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; + + if !auth_check { + return Err!(Request(Forbidden("Event is not authorized."))); + } + + // Hash and sign + let mut pdu_json = utils::to_canonical_object(&pdu).map_err(|e| { + err!(Request(BadJson(warn!("Failed to convert PDU to canonical JSON: {e}")))) + })?; + + // room v3 and above removed the "event_id" field from remote PDU format + match room_version_id { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => { + pdu_json.remove("event_id"); + }, + } + + // Add origin because synapse likes that (and it's required in the spec) + pdu_json.insert( + "origin".to_owned(), + to_canonical_value(self.services.globals.server_name()) + .expect("server name is a valid CanonicalJsonValue"), + ); + + if let Err(e) = self + .services + .server_keys + .hash_and_sign_event(&mut pdu_json, &room_version_id) + { + return match e { + | Error::Signatures(ruma::signatures::Error::PduSize) => { + Err!(Request(TooLarge("Message/PDU is too long (exceeds 65535 bytes)"))) + }, + | _ => Err!(Request(Unknown(warn!("Signing event failed: {e}")))), + }; + } + + // Generate event id + pdu.event_id = gen_event_id(&pdu_json, &room_version_id)?; + + pdu_json.insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into())); + + // Generate short event id + let _shorteventid = self + .services + .short + .get_or_create_shorteventid(&pdu.event_id) + .await; + + Ok((pdu, pdu_json)) +} diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index a381fcf6..70c98a09 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,61 +1,34 @@ +mod append; +mod backfill; +mod build; +mod create; mod data; +mod redact; -use std::{ - borrow::Borrow, - cmp, - collections::{BTreeMap, HashSet}, - fmt::Write, - iter::once, - sync::Arc, -}; +use std::{fmt::Write, sync::Arc}; use async_trait::async_trait; -pub use conduwuit::matrix::pdu::{PduId, RawPduId}; -use conduwuit::{ - Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, +pub use conduwuit_core::matrix::pdu::{PduId, RawPduId}; +use conduwuit_core::{ + Result, Server, at, err, matrix::{ - event::{Event, gen_event_id}, - pdu::{EventHash, PduBuilder, PduCount, PduEvent}, - state_res::{self, RoomVersion}, + event::Event, + pdu::{PduCount, PduEvent}, }, - utils::{ - self, IterStream, MutexMap, MutexMapGuard, ReadyExt, future::TryExtExt, stream::TryIgnore, - }, - validated, warn, -}; -use futures::{ - Future, FutureExt, Stream, StreamExt, TryStreamExt, future, future::ready, pin_mut, + utils::{MutexMap, MutexMapGuard, future::TryExtExt, stream::TryIgnore}, + warn, }; +use futures::{Future, Stream, TryStreamExt, pin_mut}; use ruma::{ - CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, - RoomId, RoomVersionId, ServerName, UserId, - api::federation, - canonical_json::to_canonical_value, - events::{ - GlobalAccountDataEventType, StateEventType, TimelineEventType, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - encrypted::Relation, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - redaction::RoomRedactionEventContent, - }, - }, - push::{Action, Ruleset, Tweak}, - uint, + CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomId, RoomId, UserId, + events::room::encrypted::Relation, }; use serde::Deserialize; -use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use self::data::Data; pub use self::data::PdusIterItem; use crate::{ - Dep, account_data, admin, appservice, - appservice::NamespaceRegex, - globals, pusher, rooms, - rooms::{short::ShortRoomId, state_compressor::CompressedState}, - sending, server_keys, users, + Dep, account_data, admin, appservice, globals, pusher, rooms, sending, server_keys, users, }; // Update Relationships @@ -259,743 +232,6 @@ impl Service { self.db.replace_pdu(pdu_id, pdu_json).await } - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth - /// happens in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_pdu<'a, Leaves>( - &'a self, - pdu: &'a PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: Leaves, - state_lock: &'a RoomMutexGuard, - ) -> Result - where - Leaves: Iterator + Send + 'a, - { - // Coalesce database writes for the remainder of this scope. - let _cork = self.db.db.cork_and_flush(); - - let shortroomid = self - .services - .short - .get_shortroomid(&pdu.room_id) - .await - .map_err(|_| err!(Database("Room does not exist")))?; - - // Make unsigned fields correct. This is not properly documented in the spec, - // but state events need to have previous content in the unsigned field, so - // clients can easily interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(BTreeMap::default())) - { - if let Ok(shortstatehash) = self - .services - .state_accessor - .pdu_shortstatehash(&pdu.event_id) - .await - { - if let Ok(prev_state) = self - .services - .state_accessor - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .await - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.get_content_as_value()) - .map_err(|e| { - err!(Database(error!( - "Failed to convert prev_state to canonical JSON: {e}", - ))) - })?, - ), - ); - unsigned.insert( - String::from("prev_sender"), - CanonicalJsonValue::String(prev_state.sender().to_string()), - ); - unsigned.insert( - String::from("replaces_state"), - CanonicalJsonValue::String(prev_state.event_id().to_string()), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.services - .pdu_metadata - .mark_as_referenced(&pdu.room_id, pdu.prev_events.iter().map(AsRef::as_ref)); - - self.services - .state - .set_forward_extremities(&pdu.room_id, leaves, state_lock) - .await; - - let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; - - let count1 = self.services.globals.next_count().unwrap(); - // Mark as read first so the sending client doesn't get a notification even if - // appending fails - self.services - .read_receipt - .private_read_set(&pdu.room_id, &pdu.sender, count1); - self.services - .user - .reset_notification_counts(&pdu.sender, &pdu.room_id); - - let count2 = PduCount::Normal(self.services.globals.next_count().unwrap()); - let pdu_id: RawPduId = PduId { shortroomid, shorteventid: count2 }.into(); - - // Insert pdu - self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2).await; - - drop(insert_lock); - - // See if the event matches any known pushers via power level - let power_levels: RoomPowerLevelsEventContent = self - .services - .state_accessor - .room_state_get_content(&pdu.room_id, &StateEventType::RoomPowerLevels, "") - .await - .unwrap_or_default(); - - let mut push_target: HashSet<_> = self - .services - .state_cache - .active_local_users_in_room(&pdu.room_id) - .map(ToOwned::to_owned) - // Don't notify the sender of their own events, and dont send from ignored users - .ready_filter(|user| *user != pdu.sender) - .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, &recipient_user).await).then_some(recipient_user) }) - .collect() - .await; - - let mut notifies = Vec::with_capacity(push_target.len().saturating_add(1)); - let mut highlights = Vec::with_capacity(push_target.len().saturating_add(1)); - - if pdu.kind == TimelineEventType::RoomMember { - if let Some(state_key) = &pdu.state_key { - let target_user_id = UserId::parse(state_key)?; - - if self.services.users.is_active_local(target_user_id).await { - push_target.insert(target_user_id.to_owned()); - } - } - } - - let serialized = pdu.to_format(); - for user in &push_target { - let rules_for_user = self - .services - .account_data - .get_global(user, GlobalAccountDataEventType::PushRules) - .await - .map_or_else( - |_| Ruleset::server_default(user), - |ev: PushRulesEvent| ev.content.global, - ); - - let mut highlight = false; - let mut notify = false; - - for action in self - .services - .pusher - .get_actions(user, &rules_for_user, &power_levels, &serialized, &pdu.room_id) - .await - { - match action { - | Action::Notify => notify = true, - | Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - }, - | _ => {}, - } - - // Break early if both conditions are true - if notify && highlight { - break; - } - } - - if notify { - notifies.push(user.clone()); - } - - if highlight { - highlights.push(user.clone()); - } - - self.services - .pusher - .get_pushkeys(user) - .ready_for_each(|push_key| { - self.services - .sending - .send_pdu_push(&pdu_id, user, push_key.to_owned()) - .expect("TODO: replace with future"); - }) - .await; - } - - self.db - .increment_notification_counts(&pdu.room_id, notifies, highlights); - - match pdu.kind { - | TimelineEventType::RoomRedaction => { - use RoomVersionId::*; - - let room_version_id = self.services.state.get_room_version(&pdu.room_id).await?; - match room_version_id { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - if let Some(redact_id) = &pdu.redacts { - if self - .services - .state_accessor - .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) - .await? - { - self.redact_pdu(redact_id, pdu, shortroomid).await?; - } - } - }, - | _ => { - let content: RoomRedactionEventContent = pdu.get_content()?; - if let Some(redact_id) = &content.redacts { - if self - .services - .state_accessor - .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) - .await? - { - self.redact_pdu(redact_id, pdu, shortroomid).await?; - } - } - }, - } - }, - | TimelineEventType::SpaceChild => - if let Some(_state_key) = &pdu.state_key { - self.services - .spaces - .roomid_spacehierarchy_cache - .lock() - .await - .remove(&pdu.room_id); - }, - | TimelineEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - // if the state_key fails - let target_user_id = UserId::parse(state_key) - .expect("This state_key was previously validated"); - - let content: RoomMemberEventContent = pdu.get_content()?; - let stripped_state = match content.membership { - | MembershipState::Invite | MembershipState::Knock => - self.services.state.summary_stripped(pdu).await.into(), - | _ => None, - }; - - // Update our membership info, we do this here incase a user is invited or - // knocked and immediately leaves we need the DB to record the invite or - // knock event for auth - self.services - .state_cache - .update_membership( - &pdu.room_id, - target_user_id, - content, - &pdu.sender, - stripped_state, - None, - true, - ) - .await?; - } - }, - | TimelineEventType::RoomMessage => { - let content: ExtractBody = pdu.get_content()?; - if let Some(body) = content.body { - self.services.search.index_pdu(shortroomid, &pdu_id, &body); - - if self.services.admin.is_admin_command(pdu, &body).await { - self.services.admin.command_with_sender( - body, - Some((*pdu.event_id).into()), - pdu.sender.clone().into(), - )?; - } - } - }, - | _ => {}, - } - - if let Ok(content) = pdu.get_content::() { - if let Ok(related_pducount) = self.get_pdu_count(&content.relates_to.event_id).await { - self.services - .pdu_metadata - .add_relation(count2, related_pducount); - } - } - - if let Ok(content) = pdu.get_content::() { - match content.relates_to { - | Relation::Reply { in_reply_to } => { - // We need to do it again here, because replies don't have - // event_id as a top level field - if let Ok(related_pducount) = self.get_pdu_count(&in_reply_to.event_id).await - { - self.services - .pdu_metadata - .add_relation(count2, related_pducount); - } - }, - | Relation::Thread(thread) => { - self.services - .threads - .add_to_thread(&thread.event_id, pdu) - .await?; - }, - | _ => {}, // TODO: Aggregate other types - } - } - - for appservice in self.services.appservice.read().await.values() { - if self - .services - .state_cache - .appservice_in_room(&pdu.room_id, appservice) - .await - { - self.services - .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == TimelineEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - let appservice_uid = appservice.registration.sender_localpart.as_str(); - if state_key_uid == &appservice_uid { - self.services - .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; - continue; - } - } - } - - let matching_users = |users: &NamespaceRegex| { - appservice.users.is_match(pdu.sender.as_str()) - || pdu.kind == TimelineEventType::RoomMember - && pdu - .state_key - .as_ref() - .is_some_and(|state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: NamespaceRegex| { - self.services - .alias - .local_aliases_for_room(&pdu.room_id) - .ready_any(move |room_alias| aliases.is_match(room_alias.as_str())) - }; - - if matching_aliases(appservice.aliases.clone()).await - || appservice.rooms.is_match(pdu.room_id.as_str()) - || matching_users(&appservice.users) - { - self.services - .sending - .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; - } - } - - Ok(pdu_id) - } - - pub async fn create_hash_and_sign_event( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - _mutex_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room - * state mutex */ - ) -> Result<(PduEvent, CanonicalJsonObject)> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - timestamp, - } = pdu_builder; - - let prev_events: Vec = self - .services - .state - .get_forward_extremities(room_id) - .take(20) - .map(Into::into) - .collect() - .await; - - // If there was no create event yet, assume we are creating a room - let room_version_id = self - .services - .state - .get_room_version(room_id) - .await - .or_else(|_| { - if event_type == TimelineEventType::RoomCreate { - let content: RoomCreateEventContent = serde_json::from_str(content.get())?; - Ok(content.room_version) - } else { - Err(Error::InconsistentRoomState( - "non-create event for room of unknown version", - room_id.to_owned(), - )) - } - })?; - - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = self - .services - .state - .get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content) - .await?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .stream() - .map(Ok) - .and_then(|event_id| self.get_pdu(event_id)) - .and_then(|pdu| future::ok(pdu.depth)) - .ignore_err() - .ready_fold(uint!(0), cmp::max) - .await - .saturating_add(uint!(1)); - - let mut unsigned = unsigned.unwrap_or_default(); - - if let Some(state_key) = &state_key { - if let Ok(prev_pdu) = self - .services - .state_accessor - .room_state_get(room_id, &event_type.to_string().into(), state_key) - .await - { - unsigned.insert("prev_content".to_owned(), prev_pdu.get_content_as_value()); - unsigned - .insert("prev_sender".to_owned(), serde_json::to_value(prev_pdu.sender())?); - unsigned.insert( - "replaces_state".to_owned(), - serde_json::to_value(prev_pdu.event_id())?, - ); - } - } - if event_type != TimelineEventType::RoomCreate && prev_events.is_empty() { - return Err!(Request(Unknown("Event incorrectly had zero prev_events."))); - } - if state_key.is_none() && depth.lt(&uint!(2)) { - // The first two events in a room are always m.room.create and m.room.member, - // so any other events with that same depth are illegal. - warn!( - "Had unsafe depth {depth} when creating non-state event in {room_id}. Cowardly \ - aborting" - ); - return Err!(Request(Unknown("Unsafe depth for non-state event."))); - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin: None, - origin_server_ts: timestamp.map_or_else( - || { - utils::millis_since_unix_epoch() - .try_into() - .expect("u64 fits into UInt") - }, - |ts| ts.get(), - ), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .values() - .map(|pdu| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned)?) - }, - hashes: EventHash { sha256: "aaa".to_owned() }, - signatures: None, - }; - - let auth_fetch = |k: &StateEventType, s: &str| { - let key = (k.clone(), s.into()); - ready(auth_events.get(&key).map(ToOwned::to_owned)) - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None, // TODO: third_party_invite - auth_fetch, - ) - .await - .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; - - if !auth_check { - return Err!(Request(Forbidden("Event is not authorized."))); - } - - // Hash and sign - let mut pdu_json = utils::to_canonical_object(&pdu).map_err(|e| { - err!(Request(BadJson(warn!("Failed to convert PDU to canonical JSON: {e}")))) - })?; - - // room v3 and above removed the "event_id" field from remote PDU format - match room_version_id { - | RoomVersionId::V1 | RoomVersionId::V2 => {}, - | _ => { - pdu_json.remove("event_id"); - }, - } - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - to_canonical_value(self.services.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), - ); - - if let Err(e) = self - .services - .server_keys - .hash_and_sign_event(&mut pdu_json, &room_version_id) - { - return match e { - | Error::Signatures(ruma::signatures::Error::PduSize) => { - Err!(Request(TooLarge("Message/PDU is too long (exceeds 65535 bytes)"))) - }, - | _ => Err!(Request(Unknown(warn!("Signing event failed: {e}")))), - }; - } - - // Generate event id - pdu.event_id = gen_event_id(&pdu_json, &room_version_id)?; - - pdu_json - .insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into())); - - // Generate short event id - let _shorteventid = self - .services - .short - .get_or_create_shorteventid(&pdu.event_id) - .await; - - Ok((pdu, pdu_json)) - } - - /// Creates a new persisted data unit and adds it to a room. This function - /// takes a roomid_mutex_state, meaning that only this function is able to - /// mutate the room state. - #[tracing::instrument(skip(self, state_lock), level = "debug")] - pub async fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - state_lock: &RoomMutexGuard, - ) -> Result { - let (pdu, pdu_json) = self - .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) - .await?; - - if self.services.admin.is_admin_room(&pdu.room_id).await { - self.check_pdu_for_admin_room(&pdu, sender).boxed().await?; - } - - // If redaction event is not authorized, do not append it to the timeline - if pdu.kind == TimelineEventType::RoomRedaction { - use RoomVersionId::*; - match self.services.state.get_room_version(&pdu.room_id).await? { - | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { - if let Some(redact_id) = &pdu.redacts { - if !self - .services - .state_accessor - .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) - .await? - { - return Err!(Request(Forbidden("User cannot redact this event."))); - } - } - }, - | _ => { - let content: RoomRedactionEventContent = pdu.get_content()?; - if let Some(redact_id) = &content.redacts { - if !self - .services - .state_accessor - .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) - .await? - { - return Err!(Request(Forbidden("User cannot redact this event."))); - } - } - }, - } - } - - if pdu.kind == TimelineEventType::RoomMember { - let content: RoomMemberEventContent = pdu.get_content()?; - - if content.join_authorized_via_users_server.is_some() - && content.membership != MembershipState::Join - { - return Err!(Request(BadJson( - "join_authorised_via_users_server is only for member joins" - ))); - } - - if content - .join_authorized_via_users_server - .as_ref() - .is_some_and(|authorising_user| { - !self.services.globals.user_is_local(authorising_user) - }) { - return Err!(Request(InvalidParam( - "Authorising user does not belong to this homeserver" - ))); - } - } - - // We append to state before appending the pdu, so we don't have a moment in - // time with the pdu without it's state. This is okay because append_pdu can't - // fail. - let statehashid = self.services.state.append_to_state(&pdu).await?; - - let pdu_id = self - .append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - once(pdu.event_id.borrow()), - state_lock, - ) - .boxed() - .await?; - - // We set the room state after inserting the pdu, so that we never have a moment - // in time where events in the current room state do not exist - self.services - .state - .set_room_state(&pdu.room_id, statehashid, state_lock); - - let mut servers: HashSet = self - .services - .state_cache - .room_servers(&pdu.room_id) - .map(ToOwned::to_owned) - .collect() - .await; - - // In case we are kicking or banning a user, we need to inform their server of - // the change - if pdu.kind == TimelineEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(state_key_uid.server_name().to_owned()); - } - } - - // Remove our server from the server list since it will be added to it by - // room_servers() and/or the if statement above - servers.remove(self.services.globals.server_name()); - - self.services - .sending - .send_pdu_servers(servers.iter().map(AsRef::as_ref).stream(), &pdu_id) - .await?; - - Ok(pdu.event_id) - } - - /// Append the incoming event setting the state snapshot to the state from - /// the server that sent the event. - #[tracing::instrument(level = "debug", skip_all)] - pub async fn append_incoming_pdu<'a, Leaves>( - &'a self, - pdu: &'a PduEvent, - pdu_json: CanonicalJsonObject, - new_room_leaves: Leaves, - state_ids_compressed: Arc, - soft_fail: bool, - state_lock: &'a RoomMutexGuard, - ) -> Result> - where - Leaves: Iterator + Send + 'a, - { - // We append to state before appending the pdu, so we don't have a moment in - // time with the pdu without it's state. This is okay because append_pdu can't - // fail. - self.services - .state - .set_event_state(&pdu.event_id, &pdu.room_id, state_ids_compressed) - .await?; - - if soft_fail { - self.services - .pdu_metadata - .mark_as_referenced(&pdu.room_id, pdu.prev_events.iter().map(AsRef::as_ref)); - - self.services - .state - .set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock) - .await; - - return Ok(None); - } - - let pdu_id = self - .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) - .await?; - - Ok(Some(pdu_id)) - } - /// Returns an iterator over all PDUs in a room. Unknown rooms produce no /// items. #[inline] @@ -1030,290 +266,4 @@ impl Service { self.db .pdus(user_id, room_id, from.unwrap_or_else(PduCount::min)) } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(name = "redact", level = "debug", skip(self))] - pub async fn redact_pdu( - &self, - event_id: &EventId, - reason: &Pdu, - shortroomid: ShortRoomId, - ) -> Result { - // TODO: Don't reserialize, keep original json - let Ok(pdu_id) = self.get_pdu_id(event_id).await else { - // If event does not exist, just noop - return Ok(()); - }; - - let mut pdu = self - .get_pdu_from_id(&pdu_id) - .await - .map(Event::into_pdu) - .map_err(|e| { - err!(Database(error!(?pdu_id, ?event_id, ?e, "PDU ID points to invalid PDU."))) - })?; - - if let Ok(content) = pdu.get_content::() { - if let Some(body) = content.body { - self.services - .search - .deindex_pdu(shortroomid, &pdu_id, &body); - } - } - - let room_version_id = self.services.state.get_room_version(pdu.room_id()).await?; - - pdu.redact(&room_version_id, reason.to_value())?; - - let obj = utils::to_canonical_object(&pdu).map_err(|e| { - err!(Database(error!(?event_id, ?e, "Failed to convert PDU to canonical JSON"))) - })?; - - self.replace_pdu(&pdu_id, &obj).await - } - - #[tracing::instrument(name = "backfill", level = "debug", skip(self))] - pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { - if self - .services - .state_cache - .room_joined_count(room_id) - .await - .is_ok_and(|count| count <= 1) - && !self - .services - .state_accessor - .is_world_readable(room_id) - .await - { - // Room is empty (1 user or none), there is no one that can backfill - return Ok(()); - } - - let first_pdu = self - .first_item_in_room(room_id) - .await - .expect("Room is not empty"); - - if first_pdu.0 < from { - // No backfill required, there are still events between them - return Ok(()); - } - - let power_levels: RoomPowerLevelsEventContent = self - .services - .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "") - .await - .unwrap_or_default(); - - let room_mods = power_levels.users.iter().filter_map(|(user_id, level)| { - if level > &power_levels.users_default - && !self.services.globals.user_is_local(user_id) - { - Some(user_id.server_name()) - } else { - None - } - }); - - let canonical_room_alias_server = once( - self.services - .state_accessor - .get_canonical_alias(room_id) - .await, - ) - .filter_map(Result::ok) - .map(|alias| alias.server_name().to_owned()) - .stream(); - - let mut servers = room_mods - .stream() - .map(ToOwned::to_owned) - .chain(canonical_room_alias_server) - .chain( - self.services - .server - .config - .trusted_servers - .iter() - .map(ToOwned::to_owned) - .stream(), - ) - .ready_filter(|server_name| !self.services.globals.server_is_ours(server_name)) - .filter_map(|server_name| async move { - self.services - .state_cache - .server_in_room(&server_name, room_id) - .await - .then_some(server_name) - }) - .boxed(); - - while let Some(ref backfill_server) = servers.next().await { - info!("Asking {backfill_server} for backfill"); - let response = self - .services - .sending - .send_federation_request( - backfill_server, - federation::backfill::get_backfill::v1::Request { - room_id: room_id.to_owned(), - v: vec![first_pdu.1.event_id().to_owned()], - limit: uint!(100), - }, - ) - .await; - match response { - | Ok(response) => { - for pdu in response.pdus { - if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await { - debug_warn!("Failed to add backfilled pdu in room {room_id}: {e}"); - } - } - return Ok(()); - }, - | Err(e) => { - warn!("{backfill_server} failed to provide backfill for room {room_id}: {e}"); - }, - } - } - - info!("No servers could backfill, but backfill was needed in room {room_id}"); - Ok(()) - } - - #[tracing::instrument(skip(self, pdu), level = "debug")] - pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box) -> Result<()> { - let (room_id, event_id, value) = - self.services.event_handler.parse_incoming_pdu(&pdu).await?; - - // Lock so we cannot backfill the same pdu twice at the same time - let mutex_lock = self - .services - .event_handler - .mutex_federation - .lock(&room_id) - .await; - - // Skip the PDU if we already have it as a timeline event - if let Ok(pdu_id) = self.get_pdu_id(&event_id).await { - debug!("We already know {event_id} at {pdu_id:?}"); - return Ok(()); - } - - self.services - .event_handler - .handle_incoming_pdu(origin, &room_id, &event_id, value, false) - .boxed() - .await?; - - let value = self.get_pdu_json(&event_id).await?; - - let pdu = self.get_pdu(&event_id).await?; - - let shortroomid = self.services.short.get_shortroomid(&room_id).await?; - - let insert_lock = self.mutex_insert.lock(&room_id).await; - - let count: i64 = self.services.globals.next_count().unwrap().try_into()?; - - let pdu_id: RawPduId = PduId { - shortroomid, - shorteventid: PduCount::Backfilled(validated!(0 - count)), - } - .into(); - - // Insert pdu - self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value); - - drop(insert_lock); - - if pdu.kind == TimelineEventType::RoomMessage { - let content: ExtractBody = pdu.get_content()?; - if let Some(body) = content.body { - self.services.search.index_pdu(shortroomid, &pdu_id, &body); - } - } - drop(mutex_lock); - - debug!("Prepended backfill pdu"); - Ok(()) - } -} - -#[implement(Service)] -#[tracing::instrument(skip_all, level = "debug")] -async fn check_pdu_for_admin_room(&self, pdu: &Pdu, sender: &UserId) -> Result -where - Pdu: Event + Send + Sync, -{ - match pdu.kind() { - | TimelineEventType::RoomEncryption => { - return Err!(Request(Forbidden(error!("Encryption not supported in admins room.")))); - }, - | TimelineEventType::RoomMember => { - let target = pdu - .state_key() - .filter(|v| v.starts_with('@')) - .unwrap_or(sender.as_str()); - - let server_user = &self.services.globals.server_user.to_string(); - - let content: RoomMemberEventContent = pdu.get_content()?; - match content.membership { - | MembershipState::Leave => { - if target == server_user { - return Err!(Request(Forbidden(error!( - "Server user cannot leave the admins room." - )))); - } - - let count = self - .services - .state_cache - .room_members(pdu.room_id()) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .ready_filter(|user| *user != target) - .boxed() - .count() - .await; - - if count < 2 { - return Err!(Request(Forbidden(error!( - "Last admin cannot leave the admins room." - )))); - } - }, - - | MembershipState::Ban if pdu.state_key().is_some() => { - if target == server_user { - return Err!(Request(Forbidden(error!( - "Server cannot be banned from admins room." - )))); - } - - let count = self - .services - .state_cache - .room_members(pdu.room_id()) - .ready_filter(|user| self.services.globals.user_is_local(user)) - .ready_filter(|user| *user != target) - .boxed() - .count() - .await; - - if count < 2 { - return Err!(Request(Forbidden(error!( - "Last admin cannot be banned from admins room." - )))); - } - }, - | _ => {}, - } - }, - | _ => {}, - } - - Ok(()) } diff --git a/src/service/rooms/timeline/redact.rs b/src/service/rooms/timeline/redact.rs new file mode 100644 index 00000000..d51a8462 --- /dev/null +++ b/src/service/rooms/timeline/redact.rs @@ -0,0 +1,51 @@ +use conduwuit_core::{ + Result, err, implement, + matrix::event::Event, + utils::{self}, +}; +use ruma::EventId; + +use super::ExtractBody; +use crate::rooms::short::ShortRoomId; + +/// Replace a PDU with the redacted form. +#[implement(super::Service)] +#[tracing::instrument(name = "redact", level = "debug", skip(self))] +pub async fn redact_pdu( + &self, + event_id: &EventId, + reason: &Pdu, + shortroomid: ShortRoomId, +) -> Result { + // TODO: Don't reserialize, keep original json + let Ok(pdu_id) = self.get_pdu_id(event_id).await else { + // If event does not exist, just noop + return Ok(()); + }; + + let mut pdu = self + .get_pdu_from_id(&pdu_id) + .await + .map(Event::into_pdu) + .map_err(|e| { + err!(Database(error!(?pdu_id, ?event_id, ?e, "PDU ID points to invalid PDU."))) + })?; + + if let Ok(content) = pdu.get_content::() { + if let Some(body) = content.body { + self.services + .search + .deindex_pdu(shortroomid, &pdu_id, &body); + } + } + + let room_version_id = self.services.state.get_room_version(pdu.room_id()).await?; + + pdu.redact(&room_version_id, reason.to_value())?; + + let obj = utils::to_canonical_object(&pdu).map_err(|e| { + err!(Database(error!(?event_id, ?e, "Failed to convert PDU to canonical JSON"))) + })?; + + self.replace_pdu(&pdu_id, &obj).await +} From 56420a67ca8dad57923aa184de860ecbd871b9f1 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 29 Apr 2025 06:55:54 +0000 Subject: [PATCH 082/270] Outdent state_compressor service. Signed-off-by: Jason Volk --- src/service/rooms/state_compressor/mod.rs | 678 +++++++++++----------- 1 file changed, 341 insertions(+), 337 deletions(-) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 56a91d0e..a33fb342 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -9,7 +9,7 @@ use async_trait::async_trait; use conduwuit::{ Result, arrayvec::ArrayVec, - at, checked, err, expected, utils, + at, checked, err, expected, implement, utils, utils::{bytes, math::usize_from_f64, stream::IterStream}, }; use database::Map; @@ -115,29 +115,30 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - /// Returns a stack with info on shortstatehash, full state, added diff and - /// removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(name = "load", level = "debug", skip(self))] - pub async fn load_shortstatehash_info( - &self, - shortstatehash: ShortStateHash, - ) -> Result { - if let Some(r) = self.stateinfo_cache.lock()?.get_mut(&shortstatehash) { - return Ok(r.clone()); - } - - let stack = self.new_shortstatehash_info(shortstatehash).await?; - - self.cache_shortstatehash_info(shortstatehash, stack.clone()) - .await?; - - Ok(stack) +/// Returns a stack with info on shortstatehash, full state, added diff and +/// removed diff for the selected shortstatehash and each parent layer. +#[implement(Service)] +#[tracing::instrument(name = "load", level = "debug", skip(self))] +pub async fn load_shortstatehash_info( + &self, + shortstatehash: ShortStateHash, +) -> Result { + if let Some(r) = self.stateinfo_cache.lock()?.get_mut(&shortstatehash) { + return Ok(r.clone()); } - /// Returns a stack with info on shortstatehash, full state, added diff and - /// removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument( + let stack = self.new_shortstatehash_info(shortstatehash).await?; + + self.cache_shortstatehash_info(shortstatehash, stack.clone()) + .await?; + + Ok(stack) +} + +/// Returns a stack with info on shortstatehash, full state, added diff and +/// removed diff for the selected shortstatehash and each parent layer. +#[implement(Service)] +#[tracing::instrument( name = "cache", level = "debug", skip_all, @@ -146,362 +147,365 @@ impl Service { stack = stack.len(), ), )] - async fn cache_shortstatehash_info( - &self, - shortstatehash: ShortStateHash, - stack: ShortStateInfoVec, - ) -> Result { - self.stateinfo_cache.lock()?.insert(shortstatehash, stack); +async fn cache_shortstatehash_info( + &self, + shortstatehash: ShortStateHash, + stack: ShortStateInfoVec, +) -> Result { + self.stateinfo_cache.lock()?.insert(shortstatehash, stack); - Ok(()) - } + Ok(()) +} - async fn new_shortstatehash_info( - &self, - shortstatehash: ShortStateHash, - ) -> Result { - let StateDiff { parent, added, removed } = self.get_statediff(shortstatehash).await?; +#[implement(Service)] +async fn new_shortstatehash_info( + &self, + shortstatehash: ShortStateHash, +) -> Result { + let StateDiff { parent, added, removed } = self.get_statediff(shortstatehash).await?; - let Some(parent) = parent else { - return Ok(vec![ShortStateInfo { - shortstatehash, - full_state: added.clone(), - added, - removed, - }]); - }; - - let mut stack = Box::pin(self.load_shortstatehash_info(parent)).await?; - let top = stack.last().expect("at least one frame"); - - let mut full_state = (*top.full_state).clone(); - full_state.extend(added.iter().copied()); - - let removed = (*removed).clone(); - for r in &removed { - full_state.remove(r); - } - - stack.push(ShortStateInfo { + let Some(parent) = parent else { + return Ok(vec![ShortStateInfo { shortstatehash, + full_state: added.clone(), added, - removed: Arc::new(removed), - full_state: Arc::new(full_state), - }); + removed, + }]); + }; - Ok(stack) + let mut stack = Box::pin(self.load_shortstatehash_info(parent)).await?; + let top = stack.last().expect("at least one frame"); + + let mut full_state = (*top.full_state).clone(); + full_state.extend(added.iter().copied()); + + let removed = (*removed).clone(); + for r in &removed { + full_state.remove(r); } - pub fn compress_state_events<'a, I>( - &'a self, - state: I, - ) -> impl Stream + Send + 'a - where - I: Iterator + Clone + Debug + Send + 'a, - { - let event_ids = state.clone().map(at!(1)); + stack.push(ShortStateInfo { + shortstatehash, + added, + removed: Arc::new(removed), + full_state: Arc::new(full_state), + }); - let short_event_ids = self - .services - .short - .multi_get_or_create_shorteventid(event_ids); + Ok(stack) +} - state - .stream() - .map(at!(0)) - .zip(short_event_ids) - .map(|(shortstatekey, shorteventid)| { - compress_state_event(*shortstatekey, shorteventid) - }) - } +#[implement(Service)] +pub fn compress_state_events<'a, I>( + &'a self, + state: I, +) -> impl Stream + Send + 'a +where + I: Iterator + Clone + Debug + Send + 'a, +{ + let event_ids = state.clone().map(at!(1)); - pub async fn compress_state_event( - &self, - shortstatekey: ShortStateKey, - event_id: &EventId, - ) -> CompressedStateEvent { - let shorteventid = self - .services - .short - .get_or_create_shorteventid(event_id) - .await; + let short_event_ids = self + .services + .short + .multi_get_or_create_shorteventid(event_ids); - compress_state_event(shortstatekey, shorteventid) - } + state + .stream() + .map(at!(0)) + .zip(short_event_ids) + .map(|(shortstatekey, shorteventid)| compress_state_event(*shortstatekey, shorteventid)) +} - /// Creates a new shortstatehash that often is just a diff to an already - /// existing shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains - /// the full state. Layer 1 contains diffs to states of layer 0, layer 2 - /// diffs to layer 1 and so on. If layer n > 0 grows too big, it will be - /// combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively - /// fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is - /// shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time - /// for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, - /// added diff and removed diff for each parent layer - pub fn save_state_from_diff( - &self, - shortstatehash: ShortStateHash, - statediffnew: Arc, - statediffremoved: Arc, - diff_to_sibling: usize, - mut parent_states: ParentStatesVec, - ) -> Result { - let statediffnew_len = statediffnew.len(); - let statediffremoved_len = statediffremoved.len(); - let diffsum = checked!(statediffnew_len + statediffremoved_len)?; +#[implement(Service)] +pub async fn compress_state_event( + &self, + shortstatekey: ShortStateKey, + event_id: &EventId, +) -> CompressedStateEvent { + let shorteventid = self + .services + .short + .get_or_create_shorteventid(event_id) + .await; - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().expect("parent must have a state"); + compress_state_event(shortstatekey, shorteventid) +} - let mut parent_new = (*parent.added).clone(); - let mut parent_removed = (*parent.removed).clone(); - - for removed in statediffremoved.iter() { - if !parent_new.remove(removed) { - // It was not added in the parent and we removed it - parent_removed.insert(*removed); - } - // Else it was added in the parent and we removed it again. We - // can forget this change - } - - for new in statediffnew.iter() { - if !parent_removed.remove(new) { - // It was not touched in the parent and we added it - parent_new.insert(*new); - } - // Else it was removed in the parent and we added it again. We - // can forget this change - } - - self.save_state_from_diff( - shortstatehash, - Arc::new(parent_new), - Arc::new(parent_removed), - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - self.save_statediff(shortstatehash, &StateDiff { - parent: None, - added: statediffnew, - removed: statediffremoved, - }); - - return Ok(()); - } - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above +/// Creates a new shortstatehash that often is just a diff to an already +/// existing shortstatehash and therefore very efficient. +/// +/// There are multiple layers of diffs. The bottom layer 0 always contains +/// the full state. Layer 1 contains diffs to states of layer 0, layer 2 +/// diffs to layer 1 and so on. If layer n > 0 grows too big, it will be +/// combined with layer n-1 to create a new diff on layer n-1 that's +/// based on layer n-2. If that layer is also too big, it will recursively +/// fix above layers too. +/// +/// * `shortstatehash` - Shortstatehash of this state +/// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid +/// * `statediffremoved` - Removed from base. Each vec is +/// shortstatekey+shorteventid +/// * `diff_to_sibling` - Approximately how much the diff grows each time for +/// this layer +/// * `parent_states` - A stack with info on shortstatehash, full state, added +/// diff and removed diff for each parent layer +#[implement(Service)] +pub fn save_state_from_diff( + &self, + shortstatehash: ShortStateHash, + statediffnew: Arc, + statediffremoved: Arc, + diff_to_sibling: usize, + mut parent_states: ParentStatesVec, +) -> Result { + let statediffnew_len = statediffnew.len(); + let statediffremoved_len = statediffremoved.len(); + let diffsum = checked!(statediffnew_len + statediffremoved_len)?; + if parent_states.len() > 3 { + // Number of layers + // To many layers, we have to go deeper let parent = parent_states.pop().expect("parent must have a state"); - let parent_added_len = parent.added.len(); - let parent_removed_len = parent.removed.len(); - let parent_diff = checked!(parent_added_len + parent_removed_len)?; - if checked!(diffsum * diffsum)? >= checked!(2 * diff_to_sibling * parent_diff)? { - // Diff too big, we replace above layer(s) - let mut parent_new = (*parent.added).clone(); - let mut parent_removed = (*parent.removed).clone(); + let mut parent_new = (*parent.added).clone(); + let mut parent_removed = (*parent.removed).clone(); - for removed in statediffremoved.iter() { - if !parent_new.remove(removed) { - // It was not added in the parent and we removed it - parent_removed.insert(*removed); - } - // Else it was added in the parent and we removed it again. We - // can forget this change + for removed in statediffremoved.iter() { + if !parent_new.remove(removed) { + // It was not added in the parent and we removed it + parent_removed.insert(*removed); } - - for new in statediffnew.iter() { - if !parent_removed.remove(new) { - // It was not touched in the parent and we added it - parent_new.insert(*new); - } - // Else it was removed in the parent and we added it again. We - // can forget this change - } - - self.save_state_from_diff( - shortstatehash, - Arc::new(parent_new), - Arc::new(parent_removed), - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - self.save_statediff(shortstatehash, &StateDiff { - parent: Some(parent.shortstatehash), - added: statediffnew, - removed: statediffremoved, - }); + // Else it was added in the parent and we removed it again. We + // can forget this change } - Ok(()) + for new in statediffnew.iter() { + if !parent_removed.remove(new) { + // It was not touched in the parent and we added it + parent_new.insert(*new); + } + // Else it was removed in the parent and we added it again. We + // can forget this change + } + + self.save_state_from_diff( + shortstatehash, + Arc::new(parent_new), + Arc::new(parent_removed), + diffsum, + parent_states, + )?; + + return Ok(()); } - /// Returns the new shortstatehash, and the state diff from the previous - /// room state - #[tracing::instrument(skip(self, new_state_ids_compressed), level = "debug")] - pub async fn save_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: Arc, - ) -> Result { - let previous_shortstatehash = self - .services - .state - .get_room_shortstatehash(room_id) - .await - .ok(); - - let state_hash = - utils::calculate_hash(new_state_ids_compressed.iter().map(|bytes| &bytes[..])); - - let (new_shortstatehash, already_existed) = self - .services - .short - .get_or_create_shortstatehash(&state_hash) - .await; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(HashSetCompressStateEvent { - shortstatehash: new_shortstatehash, - ..Default::default() - }); - } - - let states_parents = if let Some(p) = previous_shortstatehash { - self.load_shortstatehash_info(p).await.unwrap_or_default() - } else { - ShortStateInfoVec::new() - }; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: CompressedState = new_state_ids_compressed - .difference(&parent_stateinfo.full_state) - .copied() - .collect(); - - let statediffremoved: CompressedState = parent_stateinfo - .full_state - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (Arc::new(statediffnew), Arc::new(statediffremoved)) - } else { - (new_state_ids_compressed, Arc::new(CompressedState::new())) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved.clone(), - 2, // every state change is 2 event changes on average - states_parents, - )?; - } - - Ok(HashSetCompressStateEvent { - shortstatehash: new_shortstatehash, + if parent_states.is_empty() { + // There is no parent layer, create a new state + self.save_statediff(shortstatehash, &StateDiff { + parent: None, added: statediffnew, removed: statediffremoved, - }) + }); + + return Ok(()); } - #[tracing::instrument(skip(self), level = "debug", name = "get")] - async fn get_statediff(&self, shortstatehash: ShortStateHash) -> Result { - const BUFSIZE: usize = size_of::(); - const STRIDE: usize = size_of::(); + // Else we have two options. + // 1. We add the current diff on top of the parent layer. + // 2. We replace a layer above - let value = self - .db - .shortstatehash_statediff - .aqry::(&shortstatehash) - .await - .map_err(|e| { - err!(Database("Failed to find StateDiff from short {shortstatehash:?}: {e}")) - })?; + let parent = parent_states.pop().expect("parent must have a state"); + let parent_added_len = parent.added.len(); + let parent_removed_len = parent.removed.len(); + let parent_diff = checked!(parent_added_len + parent_removed_len)?; - let parent = utils::u64_from_bytes(&value[0..size_of::()]) - .ok() - .take_if(|parent| *parent != 0); + if checked!(diffsum * diffsum)? >= checked!(2 * diff_to_sibling * parent_diff)? { + // Diff too big, we replace above layer(s) + let mut parent_new = (*parent.added).clone(); + let mut parent_removed = (*parent.removed).clone(); - debug_assert!(value.len() % STRIDE == 0, "value not aligned to stride"); - let _num_values = value.len() / STRIDE; - - let mut add_mode = true; - let mut added = CompressedState::new(); - let mut removed = CompressedState::new(); - - let mut i = STRIDE; - while let Some(v) = value.get(i..expected!(i + 2 * STRIDE)) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i = expected!(i + STRIDE); - continue; + for removed in statediffremoved.iter() { + if !parent_new.remove(removed) { + // It was not added in the parent and we removed it + parent_removed.insert(*removed); } - if add_mode { - added.insert(v.try_into()?); - } else { - removed.insert(v.try_into()?); - } - i = expected!(i + 2 * STRIDE); + // Else it was added in the parent and we removed it again. We + // can forget this change } - Ok(StateDiff { - parent, - added: Arc::new(added), - removed: Arc::new(removed), - }) + for new in statediffnew.iter() { + if !parent_removed.remove(new) { + // It was not touched in the parent and we added it + parent_new.insert(*new); + } + // Else it was removed in the parent and we added it again. We + // can forget this change + } + + self.save_state_from_diff( + shortstatehash, + Arc::new(parent_new), + Arc::new(parent_removed), + diffsum, + parent_states, + )?; + } else { + // Diff small enough, we add diff as layer on top of parent + self.save_statediff(shortstatehash, &StateDiff { + parent: Some(parent.shortstatehash), + added: statediffnew, + removed: statediffremoved, + }); } - fn save_statediff(&self, shortstatehash: ShortStateHash, diff: &StateDiff) { - let mut value = Vec::::with_capacity( - 2_usize - .saturating_add(diff.added.len()) - .saturating_add(diff.removed.len()), - ); + Ok(()) +} - let parent = diff.parent.unwrap_or(0_u64); - value.extend_from_slice(&parent.to_be_bytes()); +/// Returns the new shortstatehash, and the state diff from the previous +/// room state +#[implement(Service)] +#[tracing::instrument(skip(self, new_state_ids_compressed), level = "debug")] +pub async fn save_state( + &self, + room_id: &RoomId, + new_state_ids_compressed: Arc, +) -> Result { + let previous_shortstatehash = self + .services + .state + .get_room_shortstatehash(room_id) + .await + .ok(); - for new in diff.added.iter() { - value.extend_from_slice(&new[..]); - } + let state_hash = + utils::calculate_hash(new_state_ids_compressed.iter().map(|bytes| &bytes[..])); - if !diff.removed.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in diff.removed.iter() { - value.extend_from_slice(&removed[..]); - } - } + let (new_shortstatehash, already_existed) = self + .services + .short + .get_or_create_shortstatehash(&state_hash) + .await; - self.db - .shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value); + if Some(new_shortstatehash) == previous_shortstatehash { + return Ok(HashSetCompressStateEvent { + shortstatehash: new_shortstatehash, + ..Default::default() + }); } + + let states_parents = if let Some(p) = previous_shortstatehash { + self.load_shortstatehash_info(p).await.unwrap_or_default() + } else { + ShortStateInfoVec::new() + }; + + let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew: CompressedState = new_state_ids_compressed + .difference(&parent_stateinfo.full_state) + .copied() + .collect(); + + let statediffremoved: CompressedState = parent_stateinfo + .full_state + .difference(&new_state_ids_compressed) + .copied() + .collect(); + + (Arc::new(statediffnew), Arc::new(statediffremoved)) + } else { + (new_state_ids_compressed, Arc::new(CompressedState::new())) + }; + + if !already_existed { + self.save_state_from_diff( + new_shortstatehash, + statediffnew.clone(), + statediffremoved.clone(), + 2, // every state change is 2 event changes on average + states_parents, + )?; + } + + Ok(HashSetCompressStateEvent { + shortstatehash: new_shortstatehash, + added: statediffnew, + removed: statediffremoved, + }) +} + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug", name = "get")] +async fn get_statediff(&self, shortstatehash: ShortStateHash) -> Result { + const BUFSIZE: usize = size_of::(); + const STRIDE: usize = size_of::(); + + let value = self + .db + .shortstatehash_statediff + .aqry::(&shortstatehash) + .await + .map_err(|e| { + err!(Database("Failed to find StateDiff from short {shortstatehash:?}: {e}")) + })?; + + let parent = utils::u64_from_bytes(&value[0..size_of::()]) + .ok() + .take_if(|parent| *parent != 0); + + debug_assert!(value.len() % STRIDE == 0, "value not aligned to stride"); + let _num_values = value.len() / STRIDE; + + let mut add_mode = true; + let mut added = CompressedState::new(); + let mut removed = CompressedState::new(); + + let mut i = STRIDE; + while let Some(v) = value.get(i..expected!(i + 2 * STRIDE)) { + if add_mode && v.starts_with(&0_u64.to_be_bytes()) { + add_mode = false; + i = expected!(i + STRIDE); + continue; + } + if add_mode { + added.insert(v.try_into()?); + } else { + removed.insert(v.try_into()?); + } + i = expected!(i + 2 * STRIDE); + } + + Ok(StateDiff { + parent, + added: Arc::new(added), + removed: Arc::new(removed), + }) +} + +#[implement(Service)] +fn save_statediff(&self, shortstatehash: ShortStateHash, diff: &StateDiff) { + let mut value = Vec::::with_capacity( + 2_usize + .saturating_add(diff.added.len()) + .saturating_add(diff.removed.len()), + ); + + let parent = diff.parent.unwrap_or(0_u64); + value.extend_from_slice(&parent.to_be_bytes()); + + for new in diff.added.iter() { + value.extend_from_slice(&new[..]); + } + + if !diff.removed.is_empty() { + value.extend_from_slice(&0_u64.to_be_bytes()); + for removed in diff.removed.iter() { + value.extend_from_slice(&removed[..]); + } + } + + self.db + .shortstatehash_statediff + .insert(&shortstatehash.to_be_bytes(), &value); } #[inline] From 36e81ba185dbd17aa73f9d01417ec5c37f7da4c5 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Tue, 29 Apr 2025 07:28:05 +0000 Subject: [PATCH 083/270] Split state_cache service. Signed-off-by: Jason Volk --- src/service/rooms/state_cache/mod.rs | 1308 ++++++++--------------- src/service/rooms/state_cache/update.rs | 369 +++++++ src/service/rooms/state_cache/via.rs | 92 ++ 3 files changed, 882 insertions(+), 887 deletions(-) create mode 100644 src/service/rooms/state_cache/update.rs create mode 100644 src/service/rooms/state_cache/via.rs diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index d3dbc143..9429be79 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -1,30 +1,22 @@ +mod update; +mod via; + use std::{ - collections::{HashMap, HashSet}, + collections::HashMap, sync::{Arc, RwLock}, }; use conduwuit::{ - Result, is_not_empty, + Result, implement, result::LogErr, - utils::{ReadyExt, StreamTools, stream::TryIgnore}, + utils::{ReadyExt, stream::TryIgnore}, warn, }; -use database::{Deserialized, Ignore, Interfix, Json, Map, serialize_key}; -use futures::{Stream, StreamExt, future::join5, pin_mut, stream::iter}; -use itertools::Itertools; +use database::{Deserialized, Ignore, Interfix, Map}; +use futures::{Stream, StreamExt, future::join5, pin_mut}; use ruma::{ - OwnedRoomId, OwnedServerName, RoomId, ServerName, UserId, - events::{ - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, StateEventType, - direct::DirectEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - }, - int, + OwnedRoomId, RoomId, ServerName, UserId, + events::{AnyStrippedStateEvent, AnySyncStateEvent, room::member::MembershipState}, serde::Raw, }; @@ -101,901 +93,443 @@ impl crate::Service for Service { fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } -impl Service { - /// Update current membership data. - #[tracing::instrument( - level = "debug", - skip_all, - fields( - %room_id, - %user_id, - %sender, - ?membership_event, - ), - )] - #[allow(clippy::too_many_arguments)] - pub async fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership_event: RoomMemberEventContent, - sender: &UserId, - last_state: Option>>, - invite_via: Option>, - update_joined_count: bool, - ) -> Result<()> { - let membership = membership_event.membership; - - // Keep track what remote users exist by adding them as "deactivated" users - // - // TODO: use futures to update remote profiles without blocking the membership - // update - #[allow(clippy::collapsible_if)] - if !self.services.globals.user_is_local(user_id) { - if !self.services.users.exists(user_id).await { - self.services.users.create(user_id, None)?; - } - - /* - // Try to update our local copy of the user if ours does not match - if ((self.services.users.displayname(user_id)? != membership_event.displayname) - || (self.services.users.avatar_url(user_id)? != membership_event.avatar_url) - || (self.services.users.blurhash(user_id)? != membership_event.blurhash)) - && (membership != MembershipState::Leave) - { - let response = self.services - .sending - .send_federation_request( - user_id.server_name(), - federation::query::get_profile_information::v1::Request { - user_id: user_id.into(), - field: None, // we want the full user's profile to update locally too - }, - ) - .await; - - self.services.users.set_displayname(user_id, response.displayname.clone()).await?; - self.services.users.set_avatar_url(user_id, response.avatar_url).await?; - self.services.users.set_blurhash(user_id, response.blurhash).await?; - }; - */ - } - - match &membership { - | MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id).await { - // Add the user ID to the join list then - self.mark_as_once_joined(user_id, room_id); - - // Check if the room has a predecessor - if let Ok(Some(predecessor)) = self - .services - .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomCreate, "") - .await - .map(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Ok(tag_event) = self - .services - .account_data - .get_room( - &predecessor.room_id, - user_id, - RoomAccountDataEventType::Tag, - ) - .await - { - self.services - .account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - ) - .await - .ok(); - } - - // Copy direct chat flag - if let Ok(mut direct_event) = self - .services - .account_data - .get_global::( - user_id, - GlobalAccountDataEventType::Direct, - ) - .await - { - let mut room_ids_updated = false; - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - self.services - .account_data - .update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &serde_json::to_value(&direct_event) - .expect("to json always works"), - ) - .await?; - } - } - } - } - - self.mark_as_joined(user_id, room_id); - }, - | MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - if self.services.users.user_is_ignored(sender, user_id).await { - return Ok(()); - } - - self.mark_as_invited(user_id, room_id, last_state, invite_via) - .await; - }, - | MembershipState::Leave | MembershipState::Ban => { - self.mark_as_left(user_id, room_id); - - if self.services.globals.user_is_local(user_id) - && (self.services.config.forget_forced_upon_leave - || self.services.metadata.is_banned(room_id).await - || self.services.metadata.is_disabled(room_id).await) - { - self.forget(room_id, user_id); - } - }, - | _ => {}, - } - - if update_joined_count { - self.update_joined_count(room_id).await; - } - - Ok(()) +#[implement(Service)] +#[tracing::instrument(level = "trace", skip_all)] +pub async fn appservice_in_room(&self, room_id: &RoomId, appservice: &RegistrationInfo) -> bool { + if let Some(cached) = self + .appservice_in_room_cache + .read() + .expect("locked") + .get(room_id) + .and_then(|map| map.get(&appservice.registration.id)) + .copied() + { + return cached; } - #[tracing::instrument(level = "trace", skip_all)] - pub async fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &RegistrationInfo, - ) -> bool { - if let Some(cached) = self - .appservice_in_room_cache - .read() - .expect("locked") - .get(room_id) - .and_then(|map| map.get(&appservice.registration.id)) - .copied() - { - return cached; - } + let bridge_user_id = UserId::parse_with_server_name( + appservice.registration.sender_localpart.as_str(), + self.services.globals.server_name(), + ); - let bridge_user_id = UserId::parse_with_server_name( - appservice.registration.sender_localpart.as_str(), - self.services.globals.server_name(), - ); + let Ok(bridge_user_id) = bridge_user_id.log_err() else { + return false; + }; - let Ok(bridge_user_id) = bridge_user_id.log_err() else { - return false; - }; - - let in_room = self.is_joined(&bridge_user_id, room_id).await - || self - .room_members(room_id) - .ready_any(|user_id| appservice.users.is_match(user_id.as_str())) - .await; - - self.appservice_in_room_cache - .write() - .expect("locked") - .entry(room_id.into()) - .or_default() - .insert(appservice.registration.id.clone(), in_room); - - in_room - } - - /// Direct DB function to directly mark a user as joined. It is not - /// recommended to use this directly. You most likely should use - /// `update_membership` instead - #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) { - let userroom_id = (user_id, room_id); - let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); - - let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); - - self.db.userroomid_joined.insert(&userroom_id, []); - self.db.roomuserid_joined.insert(&roomuser_id, []); - - self.db.userroomid_invitestate.remove(&userroom_id); - self.db.roomuserid_invitecount.remove(&roomuser_id); - - self.db.userroomid_leftstate.remove(&userroom_id); - self.db.roomuserid_leftcount.remove(&roomuser_id); - - self.db.userroomid_knockedstate.remove(&userroom_id); - self.db.roomuserid_knockedcount.remove(&roomuser_id); - - self.db.roomid_inviteviaservers.remove(room_id); - } - - /// Direct DB function to directly mark a user as left. It is not - /// recommended to use this directly. You most likely should use - /// `update_membership` instead - #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) { - let userroom_id = (user_id, room_id); - let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); - - let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); - - // (timo) TODO - let leftstate = Vec::>::new(); - - self.db - .userroomid_leftstate - .raw_put(&userroom_id, Json(leftstate)); - self.db - .roomuserid_leftcount - .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); - - self.db.userroomid_joined.remove(&userroom_id); - self.db.roomuserid_joined.remove(&roomuser_id); - - self.db.userroomid_invitestate.remove(&userroom_id); - self.db.roomuserid_invitecount.remove(&roomuser_id); - - self.db.userroomid_knockedstate.remove(&userroom_id); - self.db.roomuserid_knockedcount.remove(&roomuser_id); - - self.db.roomid_inviteviaservers.remove(room_id); - } - - /// Direct DB function to directly mark a user as knocked. It is not - /// recommended to use this directly. You most likely should use - /// `update_membership` instead - #[tracing::instrument(skip(self), level = "debug")] - pub fn mark_as_knocked( - &self, - user_id: &UserId, - room_id: &RoomId, - knocked_state: Option>>, - ) { - let userroom_id = (user_id, room_id); - let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); - - let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); - - self.db - .userroomid_knockedstate - .raw_put(&userroom_id, Json(knocked_state.unwrap_or_default())); - self.db - .roomuserid_knockedcount - .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); - - self.db.userroomid_joined.remove(&userroom_id); - self.db.roomuserid_joined.remove(&roomuser_id); - - self.db.userroomid_invitestate.remove(&userroom_id); - self.db.roomuserid_invitecount.remove(&roomuser_id); - - self.db.userroomid_leftstate.remove(&userroom_id); - self.db.roomuserid_leftcount.remove(&roomuser_id); - - self.db.roomid_inviteviaservers.remove(room_id); - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self), level = "debug")] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) { - let userroom_id = (user_id, room_id); - let roomuser_id = (room_id, user_id); - - self.db.userroomid_leftstate.del(userroom_id); - self.db.roomuserid_leftcount.del(roomuser_id); - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_servers<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - let prefix = (room_id, Interfix); - self.db - .roomserverids - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, server): (Ignore, &ServerName)| server) - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn server_in_room<'a>( - &'a self, - server: &'a ServerName, - room_id: &'a RoomId, - ) -> bool { - let key = (server, room_id); - self.db.serverroomids.qry(&key).await.is_ok() - } - - /// Returns an iterator of all rooms a server participates in (as far as we - /// know). - #[tracing::instrument(skip(self), level = "debug")] - pub fn server_rooms<'a>( - &'a self, - server: &'a ServerName, - ) -> impl Stream + Send + 'a { - let prefix = (server, Interfix); - self.db - .serverroomids - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, room_id): (Ignore, &RoomId)| room_id) - } - - /// Returns true if server can see user by sharing at least one room. - #[tracing::instrument(skip(self), level = "trace")] - pub async fn server_sees_user(&self, server: &ServerName, user_id: &UserId) -> bool { - self.server_rooms(server) - .any(|room_id| self.is_joined(user_id, room_id)) - .await - } - - /// Returns true if user_a and user_b share at least one room. - #[tracing::instrument(skip(self), level = "trace")] - pub async fn user_sees_user(&self, user_a: &UserId, user_b: &UserId) -> bool { - let get_shared_rooms = self.get_shared_rooms(user_a, user_b); - - pin_mut!(get_shared_rooms); - get_shared_rooms.next().await.is_some() - } - - /// List the rooms common between two users - #[tracing::instrument(skip(self), level = "debug")] - pub fn get_shared_rooms<'a>( - &'a self, - user_a: &'a UserId, - user_b: &'a UserId, - ) -> impl Stream + Send + 'a { - use conduwuit::utils::set; - - let a = self.rooms_joined(user_a); - let b = self.rooms_joined(user_b); - set::intersection_sorted_stream2(a, b) - } - - /// Returns an iterator of all joined members of a room. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_members<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - let prefix = (room_id, Interfix); - self.db - .roomuserid_joined - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, user_id): (Ignore, &UserId)| user_id) - } - - /// Returns the number of users which are currently in a room - #[tracing::instrument(skip(self), level = "trace")] - pub async fn room_joined_count(&self, room_id: &RoomId) -> Result { - self.db.roomid_joinedcount.get(room_id).await.deserialized() - } - - #[tracing::instrument(skip(self), level = "debug")] - /// Returns an iterator of all our local users in the room, even if they're - /// deactivated/guests - pub fn local_users_in_room<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - self.room_members(room_id) - .ready_filter(|user| self.services.globals.user_is_local(user)) - } - - /// Returns an iterator of all our local joined users in a room who are - /// active (not deactivated, not guest) - #[tracing::instrument(skip(self), level = "trace")] - pub fn active_local_users_in_room<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - self.local_users_in_room(room_id) - .filter(|user| self.services.users.is_active(user)) - } - - /// Returns the number of users which are currently invited to a room - #[tracing::instrument(skip(self), level = "trace")] - pub async fn room_invited_count(&self, room_id: &RoomId) -> Result { - self.db - .roomid_invitedcount - .get(room_id) - .await - .deserialized() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - let prefix = (room_id, Interfix); - self.db - .roomuseroncejoinedids - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, user_id): (Ignore, &UserId)| user_id) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_members_invited<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - let prefix = (room_id, Interfix); - self.db - .roomuserid_invitecount - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, user_id): (Ignore, &UserId)| user_id) - } - - /// Returns an iterator over all knocked members of a room. - #[tracing::instrument(skip(self), level = "debug")] - pub fn room_members_knocked<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - let prefix = (room_id, Interfix); - self.db - .roomuserid_knockedcount - .keys_prefix(&prefix) - .ignore_err() - .map(|(_, user_id): (Ignore, &UserId)| user_id) - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { - let key = (room_id, user_id); - self.db - .roomuserid_invitecount - .qry(&key) - .await - .deserialized() - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn get_knock_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { - let key = (room_id, user_id); - self.db - .roomuserid_knockedcount - .qry(&key) - .await - .deserialized() - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { - let key = (room_id, user_id); - self.db.roomuserid_leftcount.qry(&key).await.deserialized() - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_joined<'a>( - &'a self, - user_id: &'a UserId, - ) -> impl Stream + Send + 'a { - self.db - .userroomid_joined - .keys_raw_prefix(user_id) - .ignore_err() - .map(|(_, room_id): (Ignore, &RoomId)| room_id) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_invited<'a>( - &'a self, - user_id: &'a UserId, - ) -> impl Stream + Send + 'a { - type KeyVal<'a> = (Key<'a>, Raw>); - type Key<'a> = (&'a UserId, &'a RoomId); - - let prefix = (user_id, Interfix); - self.db - .userroomid_invitestate - .stream_prefix(&prefix) - .ignore_err() - .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) - .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) - .ignore_err() - } - - /// Returns an iterator over all rooms a user is currently knocking. - #[tracing::instrument(skip(self), level = "trace")] - pub fn rooms_knocked<'a>( - &'a self, - user_id: &'a UserId, - ) -> impl Stream + Send + 'a { - type KeyVal<'a> = (Key<'a>, Raw>); - type Key<'a> = (&'a UserId, &'a RoomId); - - let prefix = (user_id, Interfix); - self.db - .userroomid_knockedstate - .stream_prefix(&prefix) - .ignore_err() - .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) - .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) - .ignore_err() - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>> { - let key = (user_id, room_id); - self.db - .userroomid_invitestate - .qry(&key) - .await - .deserialized() - .and_then(|val: Raw>| { - val.deserialize_as().map_err(Into::into) - }) - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn knock_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>> { - let key = (user_id, room_id); - self.db - .userroomid_knockedstate - .qry(&key) - .await - .deserialized() - .and_then(|val: Raw>| { - val.deserialize_as().map_err(Into::into) - }) - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>> { - let key = (user_id, room_id); - self.db - .userroomid_leftstate - .qry(&key) - .await - .deserialized() - .and_then(|val: Raw>| { - val.deserialize_as().map_err(Into::into) - }) - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self), level = "debug")] - pub fn rooms_left<'a>( - &'a self, - user_id: &'a UserId, - ) -> impl Stream + Send + 'a { - type KeyVal<'a> = (Key<'a>, Raw>>); - type Key<'a> = (&'a UserId, &'a RoomId); - - let prefix = (user_id, Interfix); - self.db - .userroomid_leftstate - .stream_prefix(&prefix) - .ignore_err() - .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) - .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) - .ignore_err() - } - - #[tracing::instrument(skip(self), level = "debug")] - pub async fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> bool { - let key = (user_id, room_id); - self.db.roomuseroncejoinedids.qry(&key).await.is_ok() - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn is_joined<'a>(&'a self, user_id: &'a UserId, room_id: &'a RoomId) -> bool { - let key = (user_id, room_id); - self.db.userroomid_joined.qry(&key).await.is_ok() - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn is_knocked<'a>(&'a self, user_id: &'a UserId, room_id: &'a RoomId) -> bool { - let key = (user_id, room_id); - self.db.userroomid_knockedstate.qry(&key).await.is_ok() - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> bool { - let key = (user_id, room_id); - self.db.userroomid_invitestate.qry(&key).await.is_ok() - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> bool { - let key = (user_id, room_id); - self.db.userroomid_leftstate.qry(&key).await.is_ok() - } - - #[tracing::instrument(skip(self), level = "trace")] - pub async fn user_membership( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Option { - let states = join5( - self.is_joined(user_id, room_id), - self.is_left(user_id, room_id), - self.is_knocked(user_id, room_id), - self.is_invited(user_id, room_id), - self.once_joined(user_id, room_id), - ) - .await; - - match states { - | (true, ..) => Some(MembershipState::Join), - | (_, true, ..) => Some(MembershipState::Leave), - | (_, _, true, ..) => Some(MembershipState::Knock), - | (_, _, _, true, ..) => Some(MembershipState::Invite), - | (false, false, false, false, true) => Some(MembershipState::Ban), - | _ => None, - } - } - - #[tracing::instrument(skip(self), level = "debug")] - pub fn servers_invite_via<'a>( - &'a self, - room_id: &'a RoomId, - ) -> impl Stream + Send + 'a { - type KeyVal<'a> = (Ignore, Vec<&'a ServerName>); - - self.db - .roomid_inviteviaservers - .stream_raw_prefix(room_id) - .ignore_err() - .map(|(_, servers): KeyVal<'_>| *servers.last().expect("at least one server")) - } - - /// Gets up to five servers that are likely to be in the room in the - /// distant future. - /// - /// See - #[tracing::instrument(skip(self), level = "trace")] - pub async fn servers_route_via(&self, room_id: &RoomId) -> Result> { - let most_powerful_user_server = self - .services - .state_accessor - .room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "") - .await - .map(|content: RoomPowerLevelsEventContent| { - content - .users - .iter() - .max_by_key(|(_, power)| *power) - .and_then(|x| (x.1 >= &int!(50)).then_some(x)) - .map(|(user, _power)| user.server_name().to_owned()) - }); - - let mut servers: Vec = self + let in_room = self.is_joined(&bridge_user_id, room_id).await + || self .room_members(room_id) - .counts_by(|user| user.server_name().to_owned()) - .await - .into_iter() - .sorted_by_key(|(_, users)| *users) - .map(|(server, _)| server) - .rev() - .take(5) - .collect(); - - if let Ok(Some(server)) = most_powerful_user_server { - servers.insert(0, server); - servers.truncate(5); - } - - Ok(servers) - } - - pub fn get_appservice_in_room_cache_usage(&self) -> (usize, usize) { - let cache = self.appservice_in_room_cache.read().expect("locked"); - - (cache.len(), cache.capacity()) - } - - #[tracing::instrument(level = "debug", skip_all)] - pub fn clear_appservice_in_room_cache(&self) { - self.appservice_in_room_cache - .write() - .expect("locked") - .clear(); - } - - #[tracing::instrument(level = "debug", skip(self))] - pub async fn update_joined_count(&self, room_id: &RoomId) { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut knockedcount = 0_u64; - let mut joined_servers = HashSet::new(); - - self.room_members(room_id) - .ready_for_each(|joined| { - joined_servers.insert(joined.server_name().to_owned()); - joinedcount = joinedcount.saturating_add(1); - }) + .ready_any(|user_id| appservice.users.is_match(user_id.as_str())) .await; - invitedcount = invitedcount.saturating_add( - self.room_members_invited(room_id) - .count() - .await - .try_into() - .unwrap_or(0), - ); + self.appservice_in_room_cache + .write() + .expect("locked") + .entry(room_id.into()) + .or_default() + .insert(appservice.registration.id.clone(), in_room); - knockedcount = knockedcount.saturating_add( - self.room_members_knocked(room_id) - .count() - .await - .try_into() - .unwrap_or(0), - ); + in_room +} - self.db.roomid_joinedcount.raw_put(room_id, joinedcount); - self.db.roomid_invitedcount.raw_put(room_id, invitedcount); - self.db - .roomuserid_knockedcount - .raw_put(room_id, knockedcount); +#[implement(Service)] +pub fn get_appservice_in_room_cache_usage(&self) -> (usize, usize) { + let cache = self.appservice_in_room_cache.read().expect("locked"); - self.room_servers(room_id) - .ready_for_each(|old_joined_server| { - if joined_servers.remove(old_joined_server) { - return; - } + (cache.len(), cache.capacity()) +} - // Server not in room anymore - let roomserver_id = (room_id, old_joined_server); - let serverroom_id = (old_joined_server, room_id); +#[implement(Service)] +#[tracing::instrument(level = "debug", skip_all)] +pub fn clear_appservice_in_room_cache(&self) { + self.appservice_in_room_cache + .write() + .expect("locked") + .clear(); +} - self.db.roomserverids.del(roomserver_id); - self.db.serverroomids.del(serverroom_id); - }) - .await; +/// Returns an iterator of all servers participating in this room. +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_servers<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream + Send + 'a { + let prefix = (room_id, Interfix); + self.db + .roomserverids + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, server): (Ignore, &ServerName)| server) +} - // Now only new servers are in joined_servers anymore - for server in &joined_servers { - let roomserver_id = (room_id, server); - let serverroom_id = (server, room_id); +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn server_in_room<'a>(&'a self, server: &'a ServerName, room_id: &'a RoomId) -> bool { + let key = (server, room_id); + self.db.serverroomids.qry(&key).await.is_ok() +} - self.db.roomserverids.put_raw(roomserver_id, []); - self.db.serverroomids.put_raw(serverroom_id, []); - } +/// Returns an iterator of all rooms a server participates in (as far as we +/// know). +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn server_rooms<'a>( + &'a self, + server: &'a ServerName, +) -> impl Stream + Send + 'a { + let prefix = (server, Interfix); + self.db + .serverroomids + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, room_id): (Ignore, &RoomId)| room_id) +} - self.appservice_in_room_cache - .write() - .expect("locked") - .remove(room_id); - } +/// Returns true if server can see user by sharing at least one room. +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn server_sees_user(&self, server: &ServerName, user_id: &UserId) -> bool { + self.server_rooms(server) + .any(|room_id| self.is_joined(user_id, room_id)) + .await +} - #[tracing::instrument(level = "debug", skip(self))] - fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) { - let key = (user_id, room_id); - self.db.roomuseroncejoinedids.put_raw(key, []); - } +/// Returns true if user_a and user_b share at least one room. +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn user_sees_user(&self, user_a: &UserId, user_b: &UserId) -> bool { + let get_shared_rooms = self.get_shared_rooms(user_a, user_b); - #[tracing::instrument(level = "debug", skip(self, last_state, invite_via))] - pub async fn mark_as_invited( - &self, - user_id: &UserId, - room_id: &RoomId, - last_state: Option>>, - invite_via: Option>, - ) { - let roomuser_id = (room_id, user_id); - let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); + pin_mut!(get_shared_rooms); + get_shared_rooms.next().await.is_some() +} - let userroom_id = (user_id, room_id); - let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); +/// List the rooms common between two users +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn get_shared_rooms<'a>( + &'a self, + user_a: &'a UserId, + user_b: &'a UserId, +) -> impl Stream + Send + 'a { + use conduwuit::utils::set; - self.db - .userroomid_invitestate - .raw_put(&userroom_id, Json(last_state.unwrap_or_default())); - self.db - .roomuserid_invitecount - .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); + let a = self.rooms_joined(user_a); + let b = self.rooms_joined(user_b); + set::intersection_sorted_stream2(a, b) +} - self.db.userroomid_joined.remove(&userroom_id); - self.db.roomuserid_joined.remove(&roomuser_id); +/// Returns an iterator of all joined members of a room. +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_members<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream + Send + 'a { + let prefix = (room_id, Interfix); + self.db + .roomuserid_joined + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, user_id): (Ignore, &UserId)| user_id) +} - self.db.userroomid_leftstate.remove(&userroom_id); - self.db.roomuserid_leftcount.remove(&roomuser_id); +/// Returns the number of users which are currently in a room +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn room_joined_count(&self, room_id: &RoomId) -> Result { + self.db.roomid_joinedcount.get(room_id).await.deserialized() +} - self.db.userroomid_knockedstate.remove(&userroom_id); - self.db.roomuserid_knockedcount.remove(&roomuser_id); +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +/// Returns an iterator of all our local users in the room, even if they're +/// deactivated/guests +pub fn local_users_in_room<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream + Send + 'a { + self.room_members(room_id) + .ready_filter(|user| self.services.globals.user_is_local(user)) +} - if let Some(servers) = invite_via.filter(is_not_empty!()) { - self.add_servers_invite_via(room_id, servers).await; - } - } +/// Returns an iterator of all our local joined users in a room who are +/// active (not deactivated, not guest) +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub fn active_local_users_in_room<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream + Send + 'a { + self.local_users_in_room(room_id) + .filter(|user| self.services.users.is_active(user)) +} - #[tracing::instrument(level = "debug", skip(self, servers))] - pub async fn add_servers_invite_via(&self, room_id: &RoomId, servers: Vec) { - let mut servers: Vec<_> = self - .servers_invite_via(room_id) - .map(ToOwned::to_owned) - .chain(iter(servers.into_iter())) - .collect() - .await; +/// Returns the number of users which are currently invited to a room +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn room_invited_count(&self, room_id: &RoomId) -> Result { + self.db + .roomid_invitedcount + .get(room_id) + .await + .deserialized() +} - servers.sort_unstable(); - servers.dedup(); +/// Returns an iterator over all User IDs who ever joined a room. +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_useroncejoined<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream + Send + 'a { + let prefix = (room_id, Interfix); + self.db + .roomuseroncejoinedids + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, user_id): (Ignore, &UserId)| user_id) +} - let servers = servers - .iter() - .map(|server| server.as_bytes()) - .collect_vec() - .join(&[0xFF][..]); +/// Returns an iterator over all invited members of a room. +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_members_invited<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream + Send + 'a { + let prefix = (room_id, Interfix); + self.db + .roomuserid_invitecount + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, user_id): (Ignore, &UserId)| user_id) +} - self.db - .roomid_inviteviaservers - .insert(room_id.as_bytes(), &servers); +/// Returns an iterator over all knocked members of a room. +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn room_members_knocked<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream + Send + 'a { + let prefix = (room_id, Interfix); + self.db + .roomuserid_knockedcount + .keys_prefix(&prefix) + .ignore_err() + .map(|(_, user_id): (Ignore, &UserId)| user_id) +} + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { + let key = (room_id, user_id); + self.db + .roomuserid_invitecount + .qry(&key) + .await + .deserialized() +} + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn get_knock_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { + let key = (room_id, user_id); + self.db + .roomuserid_knockedcount + .qry(&key) + .await + .deserialized() +} + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result { + let key = (room_id, user_id); + self.db.roomuserid_leftcount.qry(&key).await.deserialized() +} + +/// Returns an iterator over all rooms this user joined. +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn rooms_joined<'a>( + &'a self, + user_id: &'a UserId, +) -> impl Stream + Send + 'a { + self.db + .userroomid_joined + .keys_raw_prefix(user_id) + .ignore_err() + .map(|(_, room_id): (Ignore, &RoomId)| room_id) +} + +/// Returns an iterator over all rooms a user was invited to. +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn rooms_invited<'a>( + &'a self, + user_id: &'a UserId, +) -> impl Stream + Send + 'a { + type KeyVal<'a> = (Key<'a>, Raw>); + type Key<'a> = (&'a UserId, &'a RoomId); + + let prefix = (user_id, Interfix); + self.db + .userroomid_invitestate + .stream_prefix(&prefix) + .ignore_err() + .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) + .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) + .ignore_err() +} + +/// Returns an iterator over all rooms a user is currently knocking. +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub fn rooms_knocked<'a>( + &'a self, + user_id: &'a UserId, +) -> impl Stream + Send + 'a { + type KeyVal<'a> = (Key<'a>, Raw>); + type Key<'a> = (&'a UserId, &'a RoomId); + + let prefix = (user_id, Interfix); + self.db + .userroomid_knockedstate + .stream_prefix(&prefix) + .ignore_err() + .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) + .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) + .ignore_err() +} + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn invite_state( + &self, + user_id: &UserId, + room_id: &RoomId, +) -> Result>> { + let key = (user_id, room_id); + self.db + .userroomid_invitestate + .qry(&key) + .await + .deserialized() + .and_then(|val: Raw>| val.deserialize_as().map_err(Into::into)) +} + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn knock_state( + &self, + user_id: &UserId, + room_id: &RoomId, +) -> Result>> { + let key = (user_id, room_id); + self.db + .userroomid_knockedstate + .qry(&key) + .await + .deserialized() + .and_then(|val: Raw>| val.deserialize_as().map_err(Into::into)) +} + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn left_state( + &self, + user_id: &UserId, + room_id: &RoomId, +) -> Result>> { + let key = (user_id, room_id); + self.db + .userroomid_leftstate + .qry(&key) + .await + .deserialized() + .and_then(|val: Raw>| val.deserialize_as().map_err(Into::into)) +} + +/// Returns an iterator over all rooms a user left. +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn rooms_left<'a>( + &'a self, + user_id: &'a UserId, +) -> impl Stream + Send + 'a { + type KeyVal<'a> = (Key<'a>, Raw>>); + type Key<'a> = (&'a UserId, &'a RoomId); + + let prefix = (user_id, Interfix); + self.db + .userroomid_leftstate + .stream_prefix(&prefix) + .ignore_err() + .map(|((_, room_id), state): KeyVal<'_>| (room_id.to_owned(), state)) + .map(|(room_id, state)| Ok((room_id, state.deserialize_as()?))) + .ignore_err() +} + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn user_membership( + &self, + user_id: &UserId, + room_id: &RoomId, +) -> Option { + let states = join5( + self.is_joined(user_id, room_id), + self.is_left(user_id, room_id), + self.is_knocked(user_id, room_id), + self.is_invited(user_id, room_id), + self.once_joined(user_id, room_id), + ) + .await; + + match states { + | (true, ..) => Some(MembershipState::Join), + | (_, true, ..) => Some(MembershipState::Leave), + | (_, _, true, ..) => Some(MembershipState::Knock), + | (_, _, _, true, ..) => Some(MembershipState::Invite), + | (false, false, false, false, true) => Some(MembershipState::Ban), + | _ => None, } } + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> bool { + let key = (user_id, room_id); + self.db.roomuseroncejoinedids.qry(&key).await.is_ok() +} + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn is_joined<'a>(&'a self, user_id: &'a UserId, room_id: &'a RoomId) -> bool { + let key = (user_id, room_id); + self.db.userroomid_joined.qry(&key).await.is_ok() +} + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn is_knocked<'a>(&'a self, user_id: &'a UserId, room_id: &'a RoomId) -> bool { + let key = (user_id, room_id); + self.db.userroomid_knockedstate.qry(&key).await.is_ok() +} + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> bool { + let key = (user_id, room_id); + self.db.userroomid_invitestate.qry(&key).await.is_ok() +} + +#[implement(Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> bool { + let key = (user_id, room_id); + self.db.userroomid_leftstate.qry(&key).await.is_ok() +} diff --git a/src/service/rooms/state_cache/update.rs b/src/service/rooms/state_cache/update.rs new file mode 100644 index 00000000..02c6bec6 --- /dev/null +++ b/src/service/rooms/state_cache/update.rs @@ -0,0 +1,369 @@ +use std::collections::HashSet; + +use conduwuit::{Result, implement, is_not_empty, utils::ReadyExt, warn}; +use database::{Json, serialize_key}; +use futures::StreamExt; +use ruma::{ + OwnedServerName, RoomId, UserId, + events::{ + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, StateEventType, + direct::DirectEvent, + room::{ + create::RoomCreateEventContent, + member::{MembershipState, RoomMemberEventContent}, + }, + }, + serde::Raw, +}; + +/// Update current membership data. +#[implement(super::Service)] +#[tracing::instrument( + level = "debug", + skip_all, + fields( + %room_id, + %user_id, + %sender, + ?membership_event, + ), + )] +#[allow(clippy::too_many_arguments)] +pub async fn update_membership( + &self, + room_id: &RoomId, + user_id: &UserId, + membership_event: RoomMemberEventContent, + sender: &UserId, + last_state: Option>>, + invite_via: Option>, + update_joined_count: bool, +) -> Result { + let membership = membership_event.membership; + + // Keep track what remote users exist by adding them as "deactivated" users + // + // TODO: use futures to update remote profiles without blocking the membership + // update + #[allow(clippy::collapsible_if)] + if !self.services.globals.user_is_local(user_id) { + if !self.services.users.exists(user_id).await { + self.services.users.create(user_id, None)?; + } + } + + match &membership { + | MembershipState::Join => { + // Check if the user never joined this room + if !self.once_joined(user_id, room_id).await { + // Add the user ID to the join list then + self.mark_as_once_joined(user_id, room_id); + + // Check if the room has a predecessor + if let Ok(Some(predecessor)) = self + .services + .state_accessor + .room_state_get_content(room_id, &StateEventType::RoomCreate, "") + .await + .map(|content: RoomCreateEventContent| content.predecessor) + { + // Copy old tags to new room + if let Ok(tag_event) = self + .services + .account_data + .get_room(&predecessor.room_id, user_id, RoomAccountDataEventType::Tag) + .await + { + self.services + .account_data + .update( + Some(room_id), + user_id, + RoomAccountDataEventType::Tag, + &tag_event, + ) + .await + .ok(); + } + + // Copy direct chat flag + if let Ok(mut direct_event) = self + .services + .account_data + .get_global::(user_id, GlobalAccountDataEventType::Direct) + .await + { + let mut room_ids_updated = false; + for room_ids in direct_event.content.0.values_mut() { + if room_ids.iter().any(|r| r == &predecessor.room_id) { + room_ids.push(room_id.to_owned()); + room_ids_updated = true; + } + } + + if room_ids_updated { + self.services + .account_data + .update( + None, + user_id, + GlobalAccountDataEventType::Direct.to_string().into(), + &serde_json::to_value(&direct_event) + .expect("to json always works"), + ) + .await?; + } + } + } + } + + self.mark_as_joined(user_id, room_id); + }, + | MembershipState::Invite => { + // We want to know if the sender is ignored by the receiver + if self.services.users.user_is_ignored(sender, user_id).await { + return Ok(()); + } + + self.mark_as_invited(user_id, room_id, last_state, invite_via) + .await; + }, + | MembershipState::Leave | MembershipState::Ban => { + self.mark_as_left(user_id, room_id); + + if self.services.globals.user_is_local(user_id) + && (self.services.config.forget_forced_upon_leave + || self.services.metadata.is_banned(room_id).await + || self.services.metadata.is_disabled(room_id).await) + { + self.forget(room_id, user_id); + } + }, + | _ => {}, + } + + if update_joined_count { + self.update_joined_count(room_id).await; + } + + Ok(()) +} + +#[implement(super::Service)] +#[tracing::instrument(level = "debug", skip(self))] +pub async fn update_joined_count(&self, room_id: &RoomId) { + let mut joinedcount = 0_u64; + let mut invitedcount = 0_u64; + let mut knockedcount = 0_u64; + let mut joined_servers = HashSet::new(); + + self.room_members(room_id) + .ready_for_each(|joined| { + joined_servers.insert(joined.server_name().to_owned()); + joinedcount = joinedcount.saturating_add(1); + }) + .await; + + invitedcount = invitedcount.saturating_add( + self.room_members_invited(room_id) + .count() + .await + .try_into() + .unwrap_or(0), + ); + + knockedcount = knockedcount.saturating_add( + self.room_members_knocked(room_id) + .count() + .await + .try_into() + .unwrap_or(0), + ); + + self.db.roomid_joinedcount.raw_put(room_id, joinedcount); + self.db.roomid_invitedcount.raw_put(room_id, invitedcount); + self.db + .roomuserid_knockedcount + .raw_put(room_id, knockedcount); + + self.room_servers(room_id) + .ready_for_each(|old_joined_server| { + if joined_servers.remove(old_joined_server) { + return; + } + + // Server not in room anymore + let roomserver_id = (room_id, old_joined_server); + let serverroom_id = (old_joined_server, room_id); + + self.db.roomserverids.del(roomserver_id); + self.db.serverroomids.del(serverroom_id); + }) + .await; + + // Now only new servers are in joined_servers anymore + for server in &joined_servers { + let roomserver_id = (room_id, server); + let serverroom_id = (server, room_id); + + self.db.roomserverids.put_raw(roomserver_id, []); + self.db.serverroomids.put_raw(serverroom_id, []); + } + + self.appservice_in_room_cache + .write() + .expect("locked") + .remove(room_id); +} + +/// Direct DB function to directly mark a user as joined. It is not +/// recommended to use this directly. You most likely should use +/// `update_membership` instead +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) { + let userroom_id = (user_id, room_id); + let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); + + let roomuser_id = (room_id, user_id); + let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); + + self.db.userroomid_joined.insert(&userroom_id, []); + self.db.roomuserid_joined.insert(&roomuser_id, []); + + self.db.userroomid_invitestate.remove(&userroom_id); + self.db.roomuserid_invitecount.remove(&roomuser_id); + + self.db.userroomid_leftstate.remove(&userroom_id); + self.db.roomuserid_leftcount.remove(&roomuser_id); + + self.db.userroomid_knockedstate.remove(&userroom_id); + self.db.roomuserid_knockedcount.remove(&roomuser_id); + + self.db.roomid_inviteviaservers.remove(room_id); +} + +/// Direct DB function to directly mark a user as left. It is not +/// recommended to use this directly. You most likely should use +/// `update_membership` instead +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) { + let userroom_id = (user_id, room_id); + let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); + + let roomuser_id = (room_id, user_id); + let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); + + // (timo) TODO + let leftstate = Vec::>::new(); + + self.db + .userroomid_leftstate + .raw_put(&userroom_id, Json(leftstate)); + self.db + .roomuserid_leftcount + .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); + + self.db.userroomid_joined.remove(&userroom_id); + self.db.roomuserid_joined.remove(&roomuser_id); + + self.db.userroomid_invitestate.remove(&userroom_id); + self.db.roomuserid_invitecount.remove(&roomuser_id); + + self.db.userroomid_knockedstate.remove(&userroom_id); + self.db.roomuserid_knockedcount.remove(&roomuser_id); + + self.db.roomid_inviteviaservers.remove(room_id); +} + +/// Direct DB function to directly mark a user as knocked. It is not +/// recommended to use this directly. You most likely should use +/// `update_membership` instead +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn mark_as_knocked( + &self, + user_id: &UserId, + room_id: &RoomId, + knocked_state: Option>>, +) { + let userroom_id = (user_id, room_id); + let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); + + let roomuser_id = (room_id, user_id); + let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); + + self.db + .userroomid_knockedstate + .raw_put(&userroom_id, Json(knocked_state.unwrap_or_default())); + self.db + .roomuserid_knockedcount + .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); + + self.db.userroomid_joined.remove(&userroom_id); + self.db.roomuserid_joined.remove(&roomuser_id); + + self.db.userroomid_invitestate.remove(&userroom_id); + self.db.roomuserid_invitecount.remove(&roomuser_id); + + self.db.userroomid_leftstate.remove(&userroom_id); + self.db.roomuserid_leftcount.remove(&roomuser_id); + + self.db.roomid_inviteviaservers.remove(room_id); +} + +/// Makes a user forget a room. +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn forget(&self, room_id: &RoomId, user_id: &UserId) { + let userroom_id = (user_id, room_id); + let roomuser_id = (room_id, user_id); + + self.db.userroomid_leftstate.del(userroom_id); + self.db.roomuserid_leftcount.del(roomuser_id); +} + +#[implement(super::Service)] +#[tracing::instrument(level = "debug", skip(self))] +fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) { + let key = (user_id, room_id); + self.db.roomuseroncejoinedids.put_raw(key, []); +} + +#[implement(super::Service)] +#[tracing::instrument(level = "debug", skip(self, last_state, invite_via))] +pub async fn mark_as_invited( + &self, + user_id: &UserId, + room_id: &RoomId, + last_state: Option>>, + invite_via: Option>, +) { + let roomuser_id = (room_id, user_id); + let roomuser_id = serialize_key(roomuser_id).expect("failed to serialize roomuser_id"); + + let userroom_id = (user_id, room_id); + let userroom_id = serialize_key(userroom_id).expect("failed to serialize userroom_id"); + + self.db + .userroomid_invitestate + .raw_put(&userroom_id, Json(last_state.unwrap_or_default())); + self.db + .roomuserid_invitecount + .raw_aput::<8, _, _>(&roomuser_id, self.services.globals.next_count().unwrap()); + + self.db.userroomid_joined.remove(&userroom_id); + self.db.roomuserid_joined.remove(&roomuser_id); + + self.db.userroomid_leftstate.remove(&userroom_id); + self.db.roomuserid_leftcount.remove(&roomuser_id); + + self.db.userroomid_knockedstate.remove(&userroom_id); + self.db.roomuserid_knockedcount.remove(&roomuser_id); + + if let Some(servers) = invite_via.filter(is_not_empty!()) { + self.add_servers_invite_via(room_id, servers).await; + } +} diff --git a/src/service/rooms/state_cache/via.rs b/src/service/rooms/state_cache/via.rs new file mode 100644 index 00000000..a818cc04 --- /dev/null +++ b/src/service/rooms/state_cache/via.rs @@ -0,0 +1,92 @@ +use conduwuit::{ + Result, implement, + utils::{StreamTools, stream::TryIgnore}, + warn, +}; +use database::Ignore; +use futures::{Stream, StreamExt, stream::iter}; +use itertools::Itertools; +use ruma::{ + OwnedServerName, RoomId, ServerName, + events::{StateEventType, room::power_levels::RoomPowerLevelsEventContent}, + int, +}; + +#[implement(super::Service)] +#[tracing::instrument(level = "debug", skip(self, servers))] +pub async fn add_servers_invite_via(&self, room_id: &RoomId, servers: Vec) { + let mut servers: Vec<_> = self + .servers_invite_via(room_id) + .map(ToOwned::to_owned) + .chain(iter(servers.into_iter())) + .collect() + .await; + + servers.sort_unstable(); + servers.dedup(); + + let servers = servers + .iter() + .map(|server| server.as_bytes()) + .collect_vec() + .join(&[0xFF][..]); + + self.db + .roomid_inviteviaservers + .insert(room_id.as_bytes(), &servers); +} + +/// Gets up to five servers that are likely to be in the room in the +/// distant future. +/// +/// See +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "trace")] +pub async fn servers_route_via(&self, room_id: &RoomId) -> Result> { + let most_powerful_user_server = self + .services + .state_accessor + .room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "") + .await + .map(|content: RoomPowerLevelsEventContent| { + content + .users + .iter() + .max_by_key(|(_, power)| *power) + .and_then(|x| (x.1 >= &int!(50)).then_some(x)) + .map(|(user, _power)| user.server_name().to_owned()) + }); + + let mut servers: Vec = self + .room_members(room_id) + .counts_by(|user| user.server_name().to_owned()) + .await + .into_iter() + .sorted_by_key(|(_, users)| *users) + .map(|(server, _)| server) + .rev() + .take(5) + .collect(); + + if let Ok(Some(server)) = most_powerful_user_server { + servers.insert(0, server); + servers.truncate(5); + } + + Ok(servers) +} + +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub fn servers_invite_via<'a>( + &'a self, + room_id: &'a RoomId, +) -> impl Stream + Send + 'a { + type KeyVal<'a> = (Ignore, Vec<&'a ServerName>); + + self.db + .roomid_inviteviaservers + .stream_raw_prefix(room_id) + .ignore_err() + .map(|(_, servers): KeyVal<'_>| *servers.last().expect("at least one server")) +} From 3c7c641d2d5ff03cd1262675490f79c1aa5b858f Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 14 May 2025 00:33:31 +0000 Subject: [PATCH 084/270] Add revoke_admin to service. Signed-off-by: Jason Volk --- src/service/admin/grant.rs | 53 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/src/service/admin/grant.rs b/src/service/admin/grant.rs index 2d90ea52..0d0e3fc1 100644 --- a/src/service/admin/grant.rs +++ b/src/service/admin/grant.rs @@ -170,3 +170,56 @@ async fn set_room_tag(&self, room_id: &RoomId, user_id: &UserId, tag: &str) -> R ) .await } + +/// Demote an admin, removing its rights. +#[implement(super::Service)] +pub async fn revoke_admin(&self, user_id: &UserId) -> Result { + use MembershipState::{Invite, Join, Knock, Leave}; + + let Ok(room_id) = self.get_admin_room().await else { + return Err!(error!("No admin room available or created.")); + }; + + let state_lock = self.services.state.mutex.lock(&room_id).await; + + let event = match self + .services + .state_accessor + .get_member(&room_id, user_id) + .await + { + | Err(e) if e.is_not_found() => return Err!("{user_id} was never an admin."), + + | Err(e) => return Err!(error!(?e, "Failure occurred while attempting revoke.")), + + | Ok(event) if !matches!(event.membership, Invite | Knock | Join) => + return Err!("Cannot revoke {user_id} in membership state {:?}.", event.membership), + + | Ok(event) => { + assert!( + matches!(event.membership, Invite | Knock | Join), + "Incorrect membership state to remove user." + ); + + event + }, + }; + + self.services + .timeline + .build_and_append_pdu( + PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { + membership: Leave, + reason: Some("Admin Revoked".into()), + is_direct: None, + join_authorized_via_users_server: None, + third_party_invite: None, + ..event + }), + self.services.globals.server_user.as_ref(), + &room_id, + &state_lock, + ) + .await + .map(|_| ()) +} From 143cb55ac86d386e7d228a8e4475ad121b906083 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Wed, 21 May 2025 23:06:27 +0000 Subject: [PATCH 085/270] Fix clippy::unnecessary-unwrap. Signed-off-by: Jason Volk --- src/service/migrations.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/service/migrations.rs b/src/service/migrations.rs index 512a7867..cee638ba 100644 --- a/src/service/migrations.rs +++ b/src/service/migrations.rs @@ -242,12 +242,14 @@ async fn db_lt_12(services: &Services) -> Result<()> { [".m.rules.contains_user_name", ".m.rule.contains_user_name"]; let rule = rules_list.content.get(content_rule_transformation[0]); - if rule.is_some() { - let mut rule = rule.unwrap().clone(); + + if let Some(rule) = rule { + let mut rule = rule.clone(); content_rule_transformation[1].clone_into(&mut rule.rule_id); rules_list .content .shift_remove(content_rule_transformation[0]); + rules_list.content.insert(rule); } } From 293e7243b3c08aaed71b89a16544a5e75b9105dc Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 2 Jul 2025 19:32:50 +0100 Subject: [PATCH 086/270] style: Fix formatting/clippy issues --- src/api/client/account.rs | 19 +++++++++++++++---- src/api/client/membership/join.rs | 2 +- src/api/client/report.rs | 1 - src/service/rooms/timeline/append.rs | 8 +++++--- src/service/rooms/timeline/create.rs | 2 +- 5 files changed, 22 insertions(+), 10 deletions(-) diff --git a/src/api/client/account.rs b/src/api/client/account.rs index df938c17..12801e7d 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -12,14 +12,25 @@ use conduwuit_service::Services; use futures::{FutureExt, StreamExt}; use register::RegistrationKind; use ruma::{ + OwnedRoomId, UserId, api::client::{ account::{ - change_password, check_registration_token_validity, deactivate, get_3pids, get_username_availability, register::{self, LoginType}, request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, whoami, ThirdPartyIdRemovalStatus + ThirdPartyIdRemovalStatus, change_password, check_registration_token_validity, + deactivate, get_3pids, get_username_availability, + register::{self, LoginType}, + request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn, + whoami, }, uiaa::{AuthFlow, AuthType, UiaaInfo}, - }, events::{ - room::{message::RoomMessageEventContent, power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}}, GlobalAccountDataEventType, StateEventType - }, push, OwnedRoomId, UserId + }, + events::{ + GlobalAccountDataEventType, StateEventType, + room::{ + message::RoomMessageEventContent, + power_levels::{RoomPowerLevels, RoomPowerLevelsEventContent}, + }, + }, + push, }; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH, join_room_by_id_helper}; diff --git a/src/api/client/membership/join.rs b/src/api/client/membership/join.rs index 9d19d3bc..dc170cbf 100644 --- a/src/api/client/membership/join.rs +++ b/src/api/client/membership/join.rs @@ -139,7 +139,7 @@ pub(crate) async fn join_room_by_id_or_alias_route( let sender_user = body.sender_user(); let appservice_info = &body.appservice_info; let body = &body.body; - if services.users.is_suspended(sender_user).await? { + if services.users.is_suspended(sender_user).await? { return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); } diff --git a/src/api/client/report.rs b/src/api/client/report.rs index 052329d1..60a16e1a 100644 --- a/src/api/client/report.rs +++ b/src/api/client/report.rs @@ -8,7 +8,6 @@ use rand::Rng; use ruma::{ EventId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId, api::client::{ - error::ErrorKind, report_user, room::{report_content, report_room}, }, diff --git a/src/service/rooms/timeline/append.rs b/src/service/rooms/timeline/append.rs index a7b558c2..1d404e8a 100644 --- a/src/service/rooms/timeline/append.rs +++ b/src/service/rooms/timeline/append.rs @@ -348,9 +348,11 @@ where self.services.search.index_pdu(shortroomid, &pdu_id, &body); if self.services.admin.is_admin_command(pdu, &body).await { - self.services - .admin - .command_with_sender(body, Some((pdu.event_id()).into()), pdu.sender.clone().into())?; + self.services.admin.command_with_sender( + body, + Some((pdu.event_id()).into()), + pdu.sender.clone().into(), + )?; } } }, diff --git a/src/service/rooms/timeline/create.rs b/src/service/rooms/timeline/create.rs index d890e88e..20ccaf56 100644 --- a/src/service/rooms/timeline/create.rs +++ b/src/service/rooms/timeline/create.rs @@ -110,7 +110,7 @@ pub async fn create_hash_and_sign_event( // so any other events with that same depth are illegal. warn!( "Had unsafe depth {depth} when creating non-state event in {room_id}. Cowardly \ - aborting" + aborting" ); return Err!(Request(Unknown("Unsafe depth for non-state event."))); } From 7e406445d415f87ee3ce03fe82193fbf4e128e21 Mon Sep 17 00:00:00 2001 From: Tom Foster Date: Thu, 3 Jul 2025 22:26:02 +0100 Subject: [PATCH 087/270] Element Web build fixes --- .forgejo/workflows/element.yml | 35 ++++++++++++++++------------------ 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/.forgejo/workflows/element.yml b/.forgejo/workflows/element.yml index db771197..0199e8b6 100644 --- a/.forgejo/workflows/element.yml +++ b/.forgejo/workflows/element.yml @@ -11,16 +11,16 @@ concurrency: jobs: build-and-deploy: - name: Build and Deploy Element Web + name: 🏗️ Build and Deploy runs-on: ubuntu-latest steps: - - name: Setup Node.js - uses: https://code.forgejo.org/actions/setup-node@v4 + - name: 📦 Setup Node.js + uses: https://github.com/actions/setup-node@v4 with: - node-version: "20" + node-version: "22" - - name: Clone, setup, and build Element Web + - name: 🔨 Clone, setup, and build Element Web run: | echo "Cloning Element Web..." git clone https://github.com/maunium/element-web @@ -64,7 +64,7 @@ jobs: echo "Checking for build output..." ls -la webapp/ - - name: Create config.json + - name: ⚙️ Create config.json run: | cat < ./element-web/webapp/config.json { @@ -100,28 +100,25 @@ jobs: echo "Created ./element-web/webapp/config.json" cat ./element-web/webapp/config.json - - name: Upload Artifact + - name: 📤 Upload Artifact uses: https://code.forgejo.org/actions/upload-artifact@v3 with: name: element-web path: ./element-web/webapp/ retention-days: 14 - - name: Install Wrangler + - name: 🛠️ Install Wrangler run: npm install --save-dev wrangler@latest - - name: Deploy to Cloudflare Pages (Production) - if: github.ref == 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' + - name: 🚀 Deploy to Cloudflare Pages + if: vars.CLOUDFLARE_PROJECT_NAME != '' + id: deploy uses: https://github.com/cloudflare/wrangler-action@v3 with: accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./element-web/webapp --branch="main" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" - - - name: Deploy to Cloudflare Pages (Preview) - if: github.ref != 'refs/heads/main' && vars.CLOUDFLARE_PROJECT_NAME != '' - uses: https://github.com/cloudflare/wrangler-action@v3 - with: - accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} - apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - command: pages deploy ./element-web/webapp --branch="${{ github.head_ref || github.ref_name }}" --commit-dirty=true --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" + command: >- + pages deploy ./element-web/webapp + --branch="${{ github.ref == 'refs/heads/main' && 'main' || github.head_ref || github.ref_name }}" + --commit-dirty=true + --project-name="${{ vars.CLOUDFLARE_PROJECT_NAME }}-element" From 52954c5b75f2ac4af62a30a9f44d527bd0511801 Mon Sep 17 00:00:00 2001 From: Nyx Date: Sun, 6 Jul 2025 14:00:42 -0500 Subject: [PATCH 088/270] Even more renaming --- src/admin/debug/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index bceee9ba..fb8a3002 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -32,13 +32,13 @@ pub(super) enum DebugCommand { /// the command. ParsePdu, - /// - Retrieve and print a PDU by EventID from the conduwuit database + /// - Retrieve and print a PDU by EventID from the Continuwuity database GetPdu { /// An event ID (a $ followed by the base64 reference hash) event_id: OwnedEventId, }, - /// - Retrieve and print a PDU by PduId from the conduwuit database + /// - Retrieve and print a PDU by PduId from the Continuwuity database GetShortPdu { /// Shortroomid integer shortroomid: ShortRoomId, @@ -182,7 +182,7 @@ pub(super) enum DebugCommand { event_id: Option, }, - /// - Runs a server name through conduwuit's true destination resolution + /// - Runs a server name through Continuwuity's true destination resolution /// process /// /// Useful for debugging well-known issues From af8783ee51e828b9af8cbe31ddeaef3cd4e9d137 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 6 Jul 2025 21:15:49 +0100 Subject: [PATCH 089/270] ci: Mirror registry images --- .forgejo/regsync/regsync.yml | 55 ++++++++++++++++++++++++++++ .forgejo/workflows/mirror-images.yml | 47 ++++++++++++++++++++++++ 2 files changed, 102 insertions(+) create mode 100644 .forgejo/regsync/regsync.yml create mode 100644 .forgejo/workflows/mirror-images.yml diff --git a/.forgejo/regsync/regsync.yml b/.forgejo/regsync/regsync.yml new file mode 100644 index 00000000..0a14db45 --- /dev/null +++ b/.forgejo/regsync/regsync.yml @@ -0,0 +1,55 @@ +version: 1 + +x-source: &source forgejo.ellis.link/continuwuation/continuwuity + +x-tags: + releases: &tags-releases + tags: + allow: + - "latest" + - "v[0-9]+\\.[0-9]+\\.[0-9]+" + - "v[0-9]+\\.[0-9]+" + - "v[0-9]+" + main: &tags-main + tags: + allow: + - "latest" + - "v[0-9]+\\.[0-9]+\\.[0-9]+" + - "v[0-9]+\\.[0-9]+" + - "v[0-9]+" + - "main" + commits: &tags-commits + tags: + allow: + - "latest" + - "v[0-9]+\\.[0-9]+\\.[0-9]+" + - "v[0-9]+\\.[0-9]+" + - "v[0-9]+" + - "main" + - "sha-[a-f0-9]+" + all: &tags-all + tags: + allow: + - ".*" + +# Registry credentials +creds: + - registry: forgejo.ellis.link + user: "{{env \"BUILTIN_REGISTRY_USER\"}}" + pass: "{{env \"BUILTIN_REGISTRY_PASSWORD\"}}" + - registry: registry.gitlab.com + user: "{{env \"GITLAB_USERNAME\"}}" + pass: "{{env \"GITLAB_TOKEN\"}}" + +# Global defaults +defaults: + parallel: 3 + interval: 2h + digestTags: true + +# Sync configuration - each registry gets different image sets +sync: + - source: *source + target: registry.gitlab.com/continuwuity/continuwuity + type: repository + <<: *tags-main diff --git a/.forgejo/workflows/mirror-images.yml b/.forgejo/workflows/mirror-images.yml new file mode 100644 index 00000000..51f60e75 --- /dev/null +++ b/.forgejo/workflows/mirror-images.yml @@ -0,0 +1,47 @@ +name: Mirror Container Images + +on: + schedule: + # Run every 2 hours + - cron: "0 */2 * * *" + workflow_dispatch: + inputs: + dry_run: + description: 'Dry run (check only, no actual mirroring)' + required: false + default: false + type: boolean + +concurrency: + group: "mirror-images" + cancel-in-progress: true + +jobs: + mirror-images: + runs-on: ubuntu-latest + env: + BUILTIN_REGISTRY_USER: ${{ secrets.BUILTIN_REGISTRY_USER }} + BUILTIN_REGISTRY_PASSWORD: ${{ secrets.BUILTIN_REGISTRY_PASSWORD }} + GITLAB_USERNAME: ${{ secrets.GITLAB_USERNAME }} + GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }} + steps: + - name: Checkout repository + uses: actions/checkout@v4 + with: + persist-credentials: false + + - name: Install regctl + uses: https://forgejo.ellis.link/continuwuation/regclient-actions/regctl-installer@main + with: + binary: regsync + + - name: Check what images need mirroring + run: | + echo "Checking images that need mirroring..." + regsync check -c .forgejo/regsync/regsync.yml -v info + + - name: Mirror images + if: ${{ !inputs.dry_run }} + run: | + echo "Starting image mirroring..." + regsync once -c .forgejo/regsync/regsync.yml -v info From 928b7c5e4a8d30c53941703fabf4467d17df412c Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 6 Jul 2025 22:57:33 +0100 Subject: [PATCH 090/270] fix: Correct vars --- .forgejo/workflows/mirror-images.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.forgejo/workflows/mirror-images.yml b/.forgejo/workflows/mirror-images.yml index 51f60e75..198832db 100644 --- a/.forgejo/workflows/mirror-images.yml +++ b/.forgejo/workflows/mirror-images.yml @@ -20,9 +20,9 @@ jobs: mirror-images: runs-on: ubuntu-latest env: - BUILTIN_REGISTRY_USER: ${{ secrets.BUILTIN_REGISTRY_USER }} + BUILTIN_REGISTRY_USER: ${{ vars.BUILTIN_REGISTRY_USER }} BUILTIN_REGISTRY_PASSWORD: ${{ secrets.BUILTIN_REGISTRY_PASSWORD }} - GITLAB_USERNAME: ${{ secrets.GITLAB_USERNAME }} + GITLAB_USERNAME: ${{ vars.GITLAB_USERNAME }} GITLAB_TOKEN: ${{ secrets.GITLAB_TOKEN }} steps: - name: Checkout repository From 18d12a7756620b14be48f9b37fc6e2ef6fa892ff Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Thu, 22 May 2025 13:19:30 +0100 Subject: [PATCH 091/270] feat: Support logging to journald with tracing-journald This stubs out on non-unix platforms. --- Cargo.lock | 12 ++++++++++++ Cargo.toml | 2 ++ arch/conduwuit.service | 4 ++++ conduwuit-example.toml | 9 +++++++++ debian/conduwuit.service | 3 +++ src/core/config/mod.rs | 18 ++++++++++++++++++ src/main/Cargo.toml | 7 +++++++ src/main/logging.rs | 37 ++++++++++++++++++++++++++++++++++++- 8 files changed, 91 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 82e7a20d..5a65a729 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -797,6 +797,7 @@ dependencies = [ "tokio-metrics", "tracing", "tracing-flame", + "tracing-journald", "tracing-opentelemetry", "tracing-subscriber", ] @@ -5178,6 +5179,17 @@ dependencies = [ "tracing-subscriber", ] +[[package]] +name = "tracing-journald" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc0b4143302cf1022dac868d521e36e8b27691f72c84b3311750d5188ebba657" +dependencies = [ + "libc", + "tracing-core", + "tracing-subscriber", +] + [[package]] name = "tracing-log" version = "0.2.0" diff --git a/Cargo.toml b/Cargo.toml index b815e2b8..75e15233 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -213,6 +213,8 @@ default-features = false version = "0.3.19" default-features = false features = ["env-filter", "std", "tracing", "tracing-log", "ansi", "fmt"] +[workspace.dependencies.tracing-journald] +version = "0.3.1" [workspace.dependencies.tracing-core] version = "0.1.33" default-features = false diff --git a/arch/conduwuit.service b/arch/conduwuit.service index d5a65e4d..b66bc1da 100644 --- a/arch/conduwuit.service +++ b/arch/conduwuit.service @@ -17,6 +17,10 @@ DeviceAllow=char-tty StandardInput=tty-force StandardOutput=tty StandardError=journal+console + +Environment="CONTINUWUITY_LOG_TO_JOURNALD=1" +Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N" + TTYReset=yes # uncomment to allow buffer to be cleared every restart TTYVTDisallocate=no diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 794ab870..1e403bba 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -671,6 +671,15 @@ # #log_thread_ids = false +# Enable journald logging on Unix platforms +# +#log_to_journald = false + +# The syslog identifier to use with journald logging +# Only used when journald logging is enabled +# +#journald_identifier = "conduwuit" + # OpenID token expiration/TTL in seconds. # # These are the OpenID tokens that are primarily used for Matrix account diff --git a/debian/conduwuit.service b/debian/conduwuit.service index be2f3dae..b95804d3 100644 --- a/debian/conduwuit.service +++ b/debian/conduwuit.service @@ -14,6 +14,9 @@ Type=notify Environment="CONTINUWUITY_CONFIG=/etc/conduwuit/conduwuit.toml" +Environment="CONTINUWUITY_LOG_TO_JOURNALD=1" +Environment="CONTINUWUITY_JOURNALD_IDENTIFIER=%N" + ExecStart=/usr/sbin/conduwuit ReadWritePaths=/var/lib/conduwuit /etc/conduwuit diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index e3db4900..6b054bd6 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -811,6 +811,24 @@ pub struct Config { #[serde(default)] pub log_thread_ids: bool, + /// Enable journald logging on Unix platforms + /// + /// When enabled, log output will be sent to the systemd journal + /// This is only supported on Unix platforms + /// + /// default: false + #[cfg(target_family = "unix")] + #[serde(default)] + pub log_to_journald: bool, + + /// The syslog identifier to use with journald logging + /// + /// Only used when journald logging is enabled + /// + /// Defaults to the binary name + #[cfg(target_family = "unix")] + pub journald_identifier: Option, + /// OpenID token expiration/TTL in seconds. /// /// These are the OpenID tokens that are primarily used for Matrix account diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index 0c5e2b6f..2d8d26b5 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -43,6 +43,7 @@ default = [ "io_uring", "jemalloc", "jemalloc_conf", + "journald", "media_thumbnail", "release_max_log_level", "systemd", @@ -130,6 +131,11 @@ sentry_telemetry = [ systemd = [ "conduwuit-router/systemd", ] +journald = [ # This is a stub on non-unix platforms + "dep:tracing-journald", +] + + # enable the tokio_console server ncompatible with release_max_log_level tokio_console = [ "dep:console-subscriber", @@ -183,6 +189,7 @@ tracing-opentelemetry.optional = true tracing-opentelemetry.workspace = true tracing-subscriber.workspace = true tracing.workspace = true +tracing-journald = { workspace = true, optional = true } [target.'cfg(all(not(target_env = "msvc"), target_os = "linux"))'.dependencies] hardened_malloc-rs.workspace = true diff --git a/src/main/logging.rs b/src/main/logging.rs index 36a8896c..b7beb103 100644 --- a/src/main/logging.rs +++ b/src/main/logging.rs @@ -46,6 +46,16 @@ pub(crate) fn init( .with(console_layer.with_filter(console_reload_filter)) .with(cap_layer); + // If journald logging is enabled on Unix platforms, create a separate + // subscriber for it + #[cfg(all(target_family = "unix", feature = "journald"))] + if config.log_to_journald { + println!("Initialising journald logging"); + if let Err(e) = init_journald_logging(config) { + eprintln!("Failed to initialize journald logging: {e}"); + } + } + #[cfg(feature = "sentry_telemetry")] let subscriber = { let sentry_filter = EnvFilter::try_new(&config.sentry_filter) @@ -135,6 +145,28 @@ pub(crate) fn init( Ok(ret) } +#[cfg(all(target_family = "unix", feature = "journald"))] +fn init_journald_logging(config: &Config) -> Result<()> { + use tracing_journald::Layer as JournaldLayer; + + let journald_filter = + EnvFilter::try_new(&config.log).map_err(|e| err!(Config("log", "{e}.")))?; + + let mut journald_layer = JournaldLayer::new() + .map_err(|e| err!(Config("journald", "Failed to initialize journald layer: {e}.")))?; + + if let Some(ref identifier) = config.journald_identifier { + journald_layer = journald_layer.with_syslog_identifier(identifier.to_owned()); + } + + let journald_subscriber = + Registry::default().with(journald_layer.with_filter(journald_filter)); + + let _guard = tracing::subscriber::set_default(journald_subscriber); + + Ok(()) +} + fn tokio_console_enabled(config: &Config) -> (bool, &'static str) { if !cfg!(all(feature = "tokio_console", tokio_unstable)) { return (false, ""); @@ -154,7 +186,10 @@ fn tokio_console_enabled(config: &Config) -> (bool, &'static str) { (true, "") } -fn set_global_default(subscriber: S) { +fn set_global_default(subscriber: S) +where + S: tracing::Subscriber + Send + Sync + 'static, +{ tracing::subscriber::set_global_default(subscriber) .expect("the global default tracing subscriber failed to be initialized"); } From d98ce2c7b9ff248989bea363907ba4709743febd Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 24 May 2025 00:28:09 +0100 Subject: [PATCH 092/270] feat: Generate admin command documentation The first part of getting admin command docs on the website. There's also the beginnings of manpage generation here, although it's kinda sus and I'm not sure how it's supposed to work. I'll leave that to anyone who wants to package it. We introduce the beginings of the xtask pattern here - we do a lot of file generation, I thought it would be best to avoid doing that on every compilation. It also helps avoid lots of runtime deps. We'll need to document generating this stuff & probably add pre-commit hooks for it, though. --- .cargo/config.toml | 2 + Cargo.lock | 121 + Cargo.toml | 10 +- conduwuit-example.toml | 13 +- docs/SUMMARY.md | 1 + docs/admin_reference.md | 2658 ++++++++++++++++++++++ src/admin/admin.rs | 2 +- src/admin/appservice/mod.rs | 2 +- src/admin/check/mod.rs | 2 +- src/admin/debug/mod.rs | 2 +- src/admin/debug/tester.rs | 2 +- src/admin/federation/mod.rs | 2 +- src/admin/media/mod.rs | 6 +- src/admin/mod.rs | 2 + src/admin/query/account_data.rs | 2 +- src/admin/query/appservice.rs | 2 +- src/admin/query/globals.rs | 2 +- src/admin/query/mod.rs | 2 +- src/admin/query/presence.rs | 2 +- src/admin/query/pusher.rs | 2 +- src/admin/query/raw.rs | 2 +- src/admin/query/resolver.rs | 2 +- src/admin/query/room_alias.rs | 2 +- src/admin/query/room_state_cache.rs | 2 +- src/admin/query/room_timeline.rs | 2 +- src/admin/query/sending.rs | 2 +- src/admin/query/short.rs | 2 +- src/admin/query/users.rs | 2 +- src/admin/room/alias.rs | 2 +- src/admin/room/directory.rs | 2 +- src/admin/room/info.rs | 2 +- src/admin/room/mod.rs | 2 +- src/admin/room/moderation.rs | 2 +- src/admin/server/mod.rs | 2 +- src/admin/user/mod.rs | 2 +- src/main/Cargo.toml | 7 + src/main/mod.rs | 14 + xtask/generate-admin-command/Cargo.toml | 26 + xtask/generate-admin-command/src/main.rs | 63 + xtask/main/Cargo.toml | 22 + xtask/main/src/main.rs | 11 + 41 files changed, 2977 insertions(+), 33 deletions(-) create mode 100644 .cargo/config.toml create mode 100644 docs/admin_reference.md create mode 100644 src/main/mod.rs create mode 100644 xtask/generate-admin-command/Cargo.toml create mode 100644 xtask/generate-admin-command/src/main.rs create mode 100644 xtask/main/Cargo.toml create mode 100644 xtask/main/src/main.rs diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 00000000..35049cbc --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,2 @@ +[alias] +xtask = "run --package xtask --" diff --git a/Cargo.lock b/Cargo.lock index 5a65a729..fe8cb16d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -50,12 +50,56 @@ dependencies = [ "alloc-no-stdlib", ] +[[package]] +name = "anstream" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", +] + [[package]] name = "anstyle" version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" +[[package]] +name = "anstyle-parse" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6680de5231bd6ee4c6191b8a1325daa282b415391ec9d3a37bd34f2060dc73fa" +dependencies = [ + "anstyle", + "once_cell_polyfill", + "windows-sys 0.59.0", +] + [[package]] name = "anyhow" version = "1.0.98" @@ -720,14 +764,25 @@ dependencies = [ "clap_derive", ] +[[package]] +name = "clap-markdown" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2a2617956a06d4885b490697b5307ebb09fec10b088afc18c81762d848c2339" +dependencies = [ + "clap", +] + [[package]] name = "clap_builder" version = "4.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0c66c08ce9f0c698cbce5c0279d0bb6ac936d8674174fe48f736533b964f59e" dependencies = [ + "anstream", "anstyle", "clap_lex", + "strsim", ] [[package]] @@ -748,6 +803,16 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" +[[package]] +name = "clap_mangen" +version = "0.2.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "724842fa9b144f9b89b3f3d371a89f3455eea660361d13a554f68f8ae5d6c13a" +dependencies = [ + "clap", + "roff", +] + [[package]] name = "cmake" version = "0.1.54" @@ -763,6 +828,12 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -2399,6 +2470,12 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + [[package]] name = "itertools" version = "0.12.1" @@ -3007,6 +3084,12 @@ dependencies = [ "portable-atomic", ] +[[package]] +name = "once_cell_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" + [[package]] name = "openssl-probe" version = "0.1.6" @@ -3796,6 +3879,12 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "roff" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88f8660c1ff60292143c98d08fc6e2f654d722db50410e3f3797d40baaf9d8f3" + [[package]] name = "ruma" version = "0.10.1" @@ -4637,6 +4726,12 @@ dependencies = [ "quote", ] +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + [[package]] name = "subslice" version = "0.2.3" @@ -5367,6 +5462,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + [[package]] name = "uuid" version = "1.17.0" @@ -6018,6 +6119,26 @@ dependencies = [ "markup5ever", ] +[[package]] +name = "xtask" +version = "0.5.0-rc.6" +dependencies = [ + "clap", + "serde", + "serde_json", +] + +[[package]] +name = "xtask-admin-command" +version = "0.5.0-rc.6" +dependencies = [ + "clap-markdown", + "clap_builder", + "clap_mangen", + "conduwuit", + "conduwuit_admin", +] + [[package]] name = "yansi" version = "1.0.1" diff --git a/Cargo.toml b/Cargo.toml index 75e15233..03c5b489 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ [workspace] resolver = "2" -members = ["src/*"] +members = ["src/*", "xtask/*"] default-members = ["src/*"] [workspace.package] @@ -638,6 +638,11 @@ package = "conduwuit_build_metadata" path = "src/build_metadata" default-features = false + +[workspace.dependencies.conduwuit] +package = "conduwuit" +path = "src/main" + ############################################################################### # # Release profiles @@ -763,7 +768,8 @@ inherits = "dev" # '-Clink-arg=-Wl,-z,nodlopen', # '-Clink-arg=-Wl,-z,nodelete', #] - +[profile.dev.package.xtask-admin-command] +inherits = "dev" [profile.dev.package.conduwuit] inherits = "dev" #rustflags = [ diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 1e403bba..22ad669b 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -407,6 +407,11 @@ # invites, or create/join or otherwise modify rooms. # They are effectively read-only. # +# If you want to use this to screen people who register on your server, +# you should add a room to `auto_join_rooms` that is public, and contains +# information that new users can read (since they won't be able to DM +# anyone, or send a message, and may be confused). +# #suspend_on_register = false # Enabling this setting opens registration to anyone without restrictions. @@ -673,12 +678,18 @@ # Enable journald logging on Unix platforms # +# When enabled, log output will be sent to the systemd journal +# This is only supported on Unix platforms +# #log_to_journald = false # The syslog identifier to use with journald logging +# # Only used when journald logging is enabled # -#journald_identifier = "conduwuit" +# Defaults to the binary name +# +#journald_identifier = # OpenID token expiration/TTL in seconds. # diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index af729003..b38009a1 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -15,6 +15,7 @@ - [Appservices](appservices.md) - [Maintenance](maintenance.md) - [Troubleshooting](troubleshooting.md) +- [Admin Command Reference](admin_reference.md) - [Development](development.md) - [Contributing](contributing.md) - [Testing](development/testing.md) diff --git a/docs/admin_reference.md b/docs/admin_reference.md new file mode 100644 index 00000000..18e039e4 --- /dev/null +++ b/docs/admin_reference.md @@ -0,0 +1,2658 @@ +# Command-Line Help for `admin` + +This document contains the help content for the `admin` command-line program. + +**Command Overview:** + +* [`admin`↴](#admin) +* [`admin appservices`↴](#admin-appservices) +* [`admin appservices register`↴](#admin-appservices-register) +* [`admin appservices unregister`↴](#admin-appservices-unregister) +* [`admin appservices show-appservice-config`↴](#admin-appservices-show-appservice-config) +* [`admin appservices list-registered`↴](#admin-appservices-list-registered) +* [`admin users`↴](#admin-users) +* [`admin users create-user`↴](#admin-users-create-user) +* [`admin users reset-password`↴](#admin-users-reset-password) +* [`admin users deactivate`↴](#admin-users-deactivate) +* [`admin users deactivate-all`↴](#admin-users-deactivate-all) +* [`admin users suspend`↴](#admin-users-suspend) +* [`admin users unsuspend`↴](#admin-users-unsuspend) +* [`admin users list-users`↴](#admin-users-list-users) +* [`admin users list-joined-rooms`↴](#admin-users-list-joined-rooms) +* [`admin users force-join-room`↴](#admin-users-force-join-room) +* [`admin users force-leave-room`↴](#admin-users-force-leave-room) +* [`admin users force-demote`↴](#admin-users-force-demote) +* [`admin users make-user-admin`↴](#admin-users-make-user-admin) +* [`admin users put-room-tag`↴](#admin-users-put-room-tag) +* [`admin users delete-room-tag`↴](#admin-users-delete-room-tag) +* [`admin users get-room-tags`↴](#admin-users-get-room-tags) +* [`admin users redact-event`↴](#admin-users-redact-event) +* [`admin users force-join-list-of-local-users`↴](#admin-users-force-join-list-of-local-users) +* [`admin users force-join-all-local-users`↴](#admin-users-force-join-all-local-users) +* [`admin rooms`↴](#admin-rooms) +* [`admin rooms list-rooms`↴](#admin-rooms-list-rooms) +* [`admin rooms info`↴](#admin-rooms-info) +* [`admin rooms info list-joined-members`↴](#admin-rooms-info-list-joined-members) +* [`admin rooms info view-room-topic`↴](#admin-rooms-info-view-room-topic) +* [`admin rooms moderation`↴](#admin-rooms-moderation) +* [`admin rooms moderation ban-room`↴](#admin-rooms-moderation-ban-room) +* [`admin rooms moderation ban-list-of-rooms`↴](#admin-rooms-moderation-ban-list-of-rooms) +* [`admin rooms moderation unban-room`↴](#admin-rooms-moderation-unban-room) +* [`admin rooms moderation list-banned-rooms`↴](#admin-rooms-moderation-list-banned-rooms) +* [`admin rooms alias`↴](#admin-rooms-alias) +* [`admin rooms alias set`↴](#admin-rooms-alias-set) +* [`admin rooms alias remove`↴](#admin-rooms-alias-remove) +* [`admin rooms alias which`↴](#admin-rooms-alias-which) +* [`admin rooms alias list`↴](#admin-rooms-alias-list) +* [`admin rooms directory`↴](#admin-rooms-directory) +* [`admin rooms directory publish`↴](#admin-rooms-directory-publish) +* [`admin rooms directory unpublish`↴](#admin-rooms-directory-unpublish) +* [`admin rooms directory list`↴](#admin-rooms-directory-list) +* [`admin rooms exists`↴](#admin-rooms-exists) +* [`admin federation`↴](#admin-federation) +* [`admin federation incoming-federation`↴](#admin-federation-incoming-federation) +* [`admin federation disable-room`↴](#admin-federation-disable-room) +* [`admin federation enable-room`↴](#admin-federation-enable-room) +* [`admin federation fetch-support-well-known`↴](#admin-federation-fetch-support-well-known) +* [`admin federation remote-user-in-rooms`↴](#admin-federation-remote-user-in-rooms) +* [`admin server`↴](#admin-server) +* [`admin server uptime`↴](#admin-server-uptime) +* [`admin server show-config`↴](#admin-server-show-config) +* [`admin server reload-config`↴](#admin-server-reload-config) +* [`admin server list-features`↴](#admin-server-list-features) +* [`admin server memory-usage`↴](#admin-server-memory-usage) +* [`admin server clear-caches`↴](#admin-server-clear-caches) +* [`admin server backup-database`↴](#admin-server-backup-database) +* [`admin server list-backups`↴](#admin-server-list-backups) +* [`admin server admin-notice`↴](#admin-server-admin-notice) +* [`admin server reload-mods`↴](#admin-server-reload-mods) +* [`admin server restart`↴](#admin-server-restart) +* [`admin server shutdown`↴](#admin-server-shutdown) +* [`admin media`↴](#admin-media) +* [`admin media delete`↴](#admin-media-delete) +* [`admin media delete-list`↴](#admin-media-delete-list) +* [`admin media delete-past-remote-media`↴](#admin-media-delete-past-remote-media) +* [`admin media delete-all-from-user`↴](#admin-media-delete-all-from-user) +* [`admin media delete-all-from-server`↴](#admin-media-delete-all-from-server) +* [`admin media get-file-info`↴](#admin-media-get-file-info) +* [`admin media get-remote-file`↴](#admin-media-get-remote-file) +* [`admin media get-remote-thumbnail`↴](#admin-media-get-remote-thumbnail) +* [`admin check`↴](#admin-check) +* [`admin check check-all-users`↴](#admin-check-check-all-users) +* [`admin debug`↴](#admin-debug) +* [`admin debug echo`↴](#admin-debug-echo) +* [`admin debug get-auth-chain`↴](#admin-debug-get-auth-chain) +* [`admin debug parse-pdu`↴](#admin-debug-parse-pdu) +* [`admin debug get-pdu`↴](#admin-debug-get-pdu) +* [`admin debug get-short-pdu`↴](#admin-debug-get-short-pdu) +* [`admin debug get-remote-pdu`↴](#admin-debug-get-remote-pdu) +* [`admin debug get-remote-pdu-list`↴](#admin-debug-get-remote-pdu-list) +* [`admin debug get-room-state`↴](#admin-debug-get-room-state) +* [`admin debug get-signing-keys`↴](#admin-debug-get-signing-keys) +* [`admin debug get-verify-keys`↴](#admin-debug-get-verify-keys) +* [`admin debug ping`↴](#admin-debug-ping) +* [`admin debug force-device-list-updates`↴](#admin-debug-force-device-list-updates) +* [`admin debug change-log-level`↴](#admin-debug-change-log-level) +* [`admin debug sign-json`↴](#admin-debug-sign-json) +* [`admin debug verify-json`↴](#admin-debug-verify-json) +* [`admin debug verify-pdu`↴](#admin-debug-verify-pdu) +* [`admin debug first-pdu-in-room`↴](#admin-debug-first-pdu-in-room) +* [`admin debug latest-pdu-in-room`↴](#admin-debug-latest-pdu-in-room) +* [`admin debug force-set-room-state-from-server`↴](#admin-debug-force-set-room-state-from-server) +* [`admin debug resolve-true-destination`↴](#admin-debug-resolve-true-destination) +* [`admin debug memory-stats`↴](#admin-debug-memory-stats) +* [`admin debug runtime-metrics`↴](#admin-debug-runtime-metrics) +* [`admin debug runtime-interval`↴](#admin-debug-runtime-interval) +* [`admin debug time`↴](#admin-debug-time) +* [`admin debug list-dependencies`↴](#admin-debug-list-dependencies) +* [`admin debug database-stats`↴](#admin-debug-database-stats) +* [`admin debug trim-memory`↴](#admin-debug-trim-memory) +* [`admin debug database-files`↴](#admin-debug-database-files) +* [`admin query`↴](#admin-query) +* [`admin query account-data`↴](#admin-query-account-data) +* [`admin query account-data changes-since`↴](#admin-query-account-data-changes-since) +* [`admin query account-data account-data-get`↴](#admin-query-account-data-account-data-get) +* [`admin query appservice`↴](#admin-query-appservice) +* [`admin query appservice get-registration`↴](#admin-query-appservice-get-registration) +* [`admin query appservice all`↴](#admin-query-appservice-all) +* [`admin query presence`↴](#admin-query-presence) +* [`admin query presence get-presence`↴](#admin-query-presence-get-presence) +* [`admin query presence presence-since`↴](#admin-query-presence-presence-since) +* [`admin query room-alias`↴](#admin-query-room-alias) +* [`admin query room-alias resolve-local-alias`↴](#admin-query-room-alias-resolve-local-alias) +* [`admin query room-alias local-aliases-for-room`↴](#admin-query-room-alias-local-aliases-for-room) +* [`admin query room-alias all-local-aliases`↴](#admin-query-room-alias-all-local-aliases) +* [`admin query room-state-cache`↴](#admin-query-room-state-cache) +* [`admin query room-state-cache server-in-room`↴](#admin-query-room-state-cache-server-in-room) +* [`admin query room-state-cache room-servers`↴](#admin-query-room-state-cache-room-servers) +* [`admin query room-state-cache server-rooms`↴](#admin-query-room-state-cache-server-rooms) +* [`admin query room-state-cache room-members`↴](#admin-query-room-state-cache-room-members) +* [`admin query room-state-cache local-users-in-room`↴](#admin-query-room-state-cache-local-users-in-room) +* [`admin query room-state-cache active-local-users-in-room`↴](#admin-query-room-state-cache-active-local-users-in-room) +* [`admin query room-state-cache room-joined-count`↴](#admin-query-room-state-cache-room-joined-count) +* [`admin query room-state-cache room-invited-count`↴](#admin-query-room-state-cache-room-invited-count) +* [`admin query room-state-cache room-user-once-joined`↴](#admin-query-room-state-cache-room-user-once-joined) +* [`admin query room-state-cache room-members-invited`↴](#admin-query-room-state-cache-room-members-invited) +* [`admin query room-state-cache get-invite-count`↴](#admin-query-room-state-cache-get-invite-count) +* [`admin query room-state-cache get-left-count`↴](#admin-query-room-state-cache-get-left-count) +* [`admin query room-state-cache rooms-joined`↴](#admin-query-room-state-cache-rooms-joined) +* [`admin query room-state-cache rooms-left`↴](#admin-query-room-state-cache-rooms-left) +* [`admin query room-state-cache rooms-invited`↴](#admin-query-room-state-cache-rooms-invited) +* [`admin query room-state-cache invite-state`↴](#admin-query-room-state-cache-invite-state) +* [`admin query room-timeline`↴](#admin-query-room-timeline) +* [`admin query room-timeline pdus`↴](#admin-query-room-timeline-pdus) +* [`admin query room-timeline last`↴](#admin-query-room-timeline-last) +* [`admin query globals`↴](#admin-query-globals) +* [`admin query globals database-version`↴](#admin-query-globals-database-version) +* [`admin query globals current-count`↴](#admin-query-globals-current-count) +* [`admin query globals last-check-for-announcements-id`↴](#admin-query-globals-last-check-for-announcements-id) +* [`admin query globals signing-keys-for`↴](#admin-query-globals-signing-keys-for) +* [`admin query sending`↴](#admin-query-sending) +* [`admin query sending active-requests`↴](#admin-query-sending-active-requests) +* [`admin query sending active-requests-for`↴](#admin-query-sending-active-requests-for) +* [`admin query sending queued-requests`↴](#admin-query-sending-queued-requests) +* [`admin query sending get-latest-edu-count`↴](#admin-query-sending-get-latest-edu-count) +* [`admin query users`↴](#admin-query-users) +* [`admin query users count-users`↴](#admin-query-users-count-users) +* [`admin query users iter-users`↴](#admin-query-users-iter-users) +* [`admin query users iter-users2`↴](#admin-query-users-iter-users2) +* [`admin query users password-hash`↴](#admin-query-users-password-hash) +* [`admin query users list-devices`↴](#admin-query-users-list-devices) +* [`admin query users list-devices-metadata`↴](#admin-query-users-list-devices-metadata) +* [`admin query users get-device-metadata`↴](#admin-query-users-get-device-metadata) +* [`admin query users get-devices-version`↴](#admin-query-users-get-devices-version) +* [`admin query users count-one-time-keys`↴](#admin-query-users-count-one-time-keys) +* [`admin query users get-device-keys`↴](#admin-query-users-get-device-keys) +* [`admin query users get-user-signing-key`↴](#admin-query-users-get-user-signing-key) +* [`admin query users get-master-key`↴](#admin-query-users-get-master-key) +* [`admin query users get-to-device-events`↴](#admin-query-users-get-to-device-events) +* [`admin query users get-latest-backup`↴](#admin-query-users-get-latest-backup) +* [`admin query users get-latest-backup-version`↴](#admin-query-users-get-latest-backup-version) +* [`admin query users get-backup-algorithm`↴](#admin-query-users-get-backup-algorithm) +* [`admin query users get-all-backups`↴](#admin-query-users-get-all-backups) +* [`admin query users get-room-backups`↴](#admin-query-users-get-room-backups) +* [`admin query users get-backup-session`↴](#admin-query-users-get-backup-session) +* [`admin query users get-shared-rooms`↴](#admin-query-users-get-shared-rooms) +* [`admin query resolver`↴](#admin-query-resolver) +* [`admin query resolver destinations-cache`↴](#admin-query-resolver-destinations-cache) +* [`admin query resolver overrides-cache`↴](#admin-query-resolver-overrides-cache) +* [`admin query pusher`↴](#admin-query-pusher) +* [`admin query pusher get-pushers`↴](#admin-query-pusher-get-pushers) +* [`admin query short`↴](#admin-query-short) +* [`admin query short short-event-id`↴](#admin-query-short-short-event-id) +* [`admin query short short-room-id`↴](#admin-query-short-short-room-id) +* [`admin query raw`↴](#admin-query-raw) +* [`admin query raw raw-maps`↴](#admin-query-raw-raw-maps) +* [`admin query raw raw-get`↴](#admin-query-raw-raw-get) +* [`admin query raw raw-del`↴](#admin-query-raw-raw-del) +* [`admin query raw raw-keys`↴](#admin-query-raw-raw-keys) +* [`admin query raw raw-keys-sizes`↴](#admin-query-raw-raw-keys-sizes) +* [`admin query raw raw-keys-total`↴](#admin-query-raw-raw-keys-total) +* [`admin query raw raw-vals-sizes`↴](#admin-query-raw-raw-vals-sizes) +* [`admin query raw raw-vals-total`↴](#admin-query-raw-raw-vals-total) +* [`admin query raw raw-iter`↴](#admin-query-raw-raw-iter) +* [`admin query raw raw-keys-from`↴](#admin-query-raw-raw-keys-from) +* [`admin query raw raw-iter-from`↴](#admin-query-raw-raw-iter-from) +* [`admin query raw raw-count`↴](#admin-query-raw-raw-count) +* [`admin query raw compact`↴](#admin-query-raw-compact) + +## `admin` + +**Usage:** `admin ` + +###### **Subcommands:** + +* `appservices` — - Commands for managing appservices +* `users` — - Commands for managing local users +* `rooms` — - Commands for managing rooms +* `federation` — - Commands for managing federation +* `server` — - Commands for managing the server +* `media` — - Commands for managing media +* `check` — - Commands for checking integrity +* `debug` — - Commands for debugging things +* `query` — - Low-level queries for database getters and iterators + + + +## `admin appservices` + +- Commands for managing appservices + +**Usage:** `admin appservices ` + +###### **Subcommands:** + +* `register` — - Register an appservice using its registration YAML +* `unregister` — - Unregister an appservice using its ID +* `show-appservice-config` — - Show an appservice's config using its ID +* `list-registered` — - List all the currently registered appservices + + + +## `admin appservices register` + +- Register an appservice using its registration YAML + +This command needs a YAML generated by an appservice (such as a bridge), which must be provided in a Markdown code block below the command. + +Registering a new bridge using the ID of an existing bridge will replace the old one. + +**Usage:** `admin appservices register` + + + +## `admin appservices unregister` + +- Unregister an appservice using its ID + +You can find the ID using the `list-appservices` command. + +**Usage:** `admin appservices unregister ` + +###### **Arguments:** + +* `` — The appservice to unregister + + + +## `admin appservices show-appservice-config` + +- Show an appservice's config using its ID + +You can find the ID using the `list-appservices` command. + +**Usage:** `admin appservices show-appservice-config ` + +###### **Arguments:** + +* `` — The appservice to show + + + +## `admin appservices list-registered` + +- List all the currently registered appservices + +**Usage:** `admin appservices list-registered` + + + +## `admin users` + +- Commands for managing local users + +**Usage:** `admin users ` + +###### **Subcommands:** + +* `create-user` — - Create a new user +* `reset-password` — - Reset user password +* `deactivate` — - Deactivate a user +* `deactivate-all` — - Deactivate a list of users +* `suspend` — - Suspend a user +* `unsuspend` — - Unsuspend a user +* `list-users` — - List local users in the database +* `list-joined-rooms` — - Lists all the rooms (local and remote) that the specified user is joined in +* `force-join-room` — - Manually join a local user to a room +* `force-leave-room` — - Manually leave a local user from a room +* `force-demote` — - Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits +* `make-user-admin` — - Grant server-admin privileges to a user +* `put-room-tag` — - Puts a room tag for the specified user and room ID +* `delete-room-tag` — - Deletes the room tag for the specified user and room ID +* `get-room-tags` — - Gets all the room tags for the specified user and room ID +* `redact-event` — - Attempts to forcefully redact the specified event ID from the sender user +* `force-join-list-of-local-users` — - Force joins a specified list of local users to join the specified room +* `force-join-all-local-users` — - Force joins all local users to the specified room + + + +## `admin users create-user` + +- Create a new user + +**Usage:** `admin users create-user [PASSWORD]` + +###### **Arguments:** + +* `` — Username of the new user +* `` — Password of the new user, if unspecified one is generated + + + +## `admin users reset-password` + +- Reset user password + +**Usage:** `admin users reset-password [PASSWORD]` + +###### **Arguments:** + +* `` — Username of the user for whom the password should be reset +* `` — New password for the user, if unspecified one is generated + + + +## `admin users deactivate` + +- Deactivate a user + +User will be removed from all rooms by default. Use --no-leave-rooms to not leave all rooms by default. + +**Usage:** `admin users deactivate [OPTIONS] ` + +###### **Arguments:** + +* `` + +###### **Options:** + +* `-n`, `--no-leave-rooms` + + + +## `admin users deactivate-all` + +- Deactivate a list of users + +Recommended to use in conjunction with list-local-users. + +Users will be removed from joined rooms by default. + +Can be overridden with --no-leave-rooms. + +Removing a mass amount of users from a room may cause a significant amount of leave events. The time to leave rooms may depend significantly on joined rooms and servers. + +This command needs a newline separated list of users provided in a Markdown code block below the command. + +**Usage:** `admin users deactivate-all [OPTIONS]` + +###### **Options:** + +* `-n`, `--no-leave-rooms` — Does not leave any rooms the user is in on deactivation +* `-f`, `--force` — Also deactivate admin accounts and will assume leave all rooms too + + + +## `admin users suspend` + +- Suspend a user + +Suspended users are able to log in, sync, and read messages, but are not able to send events nor redact them, cannot change their profile, and are unable to join, invite to, or knock on rooms. + +Suspended users can still leave rooms and deactivate their account. Suspending them effectively makes them read-only. + +**Usage:** `admin users suspend ` + +###### **Arguments:** + +* `` — Username of the user to suspend + + + +## `admin users unsuspend` + +- Unsuspend a user + +Reverses the effects of the `suspend` command, allowing the user to send messages, change their profile, create room invites, etc. + +**Usage:** `admin users unsuspend ` + +###### **Arguments:** + +* `` — Username of the user to unsuspend + + + +## `admin users list-users` + +- List local users in the database + +**Usage:** `admin users list-users` + + + +## `admin users list-joined-rooms` + +- Lists all the rooms (local and remote) that the specified user is joined in + +**Usage:** `admin users list-joined-rooms ` + +###### **Arguments:** + +* `` + + + +## `admin users force-join-room` + +- Manually join a local user to a room + +**Usage:** `admin users force-join-room ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin users force-leave-room` + +- Manually leave a local user from a room + +**Usage:** `admin users force-leave-room ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin users force-demote` + +- Forces the specified user to drop their power levels to the room default, if their permissions allow and the auth check permits + +**Usage:** `admin users force-demote ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin users make-user-admin` + +- Grant server-admin privileges to a user + +**Usage:** `admin users make-user-admin ` + +###### **Arguments:** + +* `` + + + +## `admin users put-room-tag` + +- Puts a room tag for the specified user and room ID. + +This is primarily useful if you'd like to set your admin room to the special "System Alerts" section in Element as a way to permanently see your admin room without it being buried away in your favourites or rooms. To do this, you would pass your user, your admin room's internal ID, and the tag name `m.server_notice`. + +**Usage:** `admin users put-room-tag ` + +###### **Arguments:** + +* `` +* `` +* `` + + + +## `admin users delete-room-tag` + +- Deletes the room tag for the specified user and room ID + +**Usage:** `admin users delete-room-tag ` + +###### **Arguments:** + +* `` +* `` +* `` + + + +## `admin users get-room-tags` + +- Gets all the room tags for the specified user and room ID + +**Usage:** `admin users get-room-tags ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin users redact-event` + +- Attempts to forcefully redact the specified event ID from the sender user + +This is only valid for local users + +**Usage:** `admin users redact-event ` + +###### **Arguments:** + +* `` + + + +## `admin users force-join-list-of-local-users` + +- Force joins a specified list of local users to join the specified room. + +Specify a codeblock of usernames. + +At least 1 server admin must be in the room to reduce abuse. + +Requires the `--yes-i-want-to-do-this` flag. + +**Usage:** `admin users force-join-list-of-local-users [OPTIONS] ` + +###### **Arguments:** + +* `` + +###### **Options:** + +* `--yes-i-want-to-do-this` + + + +## `admin users force-join-all-local-users` + +- Force joins all local users to the specified room. + +At least 1 server admin must be in the room to reduce abuse. + +Requires the `--yes-i-want-to-do-this` flag. + +**Usage:** `admin users force-join-all-local-users [OPTIONS] ` + +###### **Arguments:** + +* `` + +###### **Options:** + +* `--yes-i-want-to-do-this` + + + +## `admin rooms` + +- Commands for managing rooms + +**Usage:** `admin rooms ` + +###### **Subcommands:** + +* `list-rooms` — - List all rooms the server knows about +* `info` — - View information about a room we know about +* `moderation` — - Manage moderation of remote or local rooms +* `alias` — - Manage rooms' aliases +* `directory` — - Manage the room directory +* `exists` — - Check if we know about a room + + + +## `admin rooms list-rooms` + +- List all rooms the server knows about + +**Usage:** `admin rooms list-rooms [OPTIONS] [PAGE]` + +###### **Arguments:** + +* `` + +###### **Options:** + +* `--exclude-disabled` — Excludes rooms that we have federation disabled with +* `--exclude-banned` — Excludes rooms that we have banned +* `--no-details` — Whether to only output room IDs without supplementary room information + + + +## `admin rooms info` + +- View information about a room we know about + +**Usage:** `admin rooms info ` + +###### **Subcommands:** + +* `list-joined-members` — - List joined members in a room +* `view-room-topic` — - Displays room topic + + + +## `admin rooms info list-joined-members` + +- List joined members in a room + +**Usage:** `admin rooms info list-joined-members [OPTIONS] ` + +###### **Arguments:** + +* `` + +###### **Options:** + +* `--local-only` — Lists only our local users in the specified room + + + +## `admin rooms info view-room-topic` + +- Displays room topic + +Room topics can be huge, so this is in its own separate command + +**Usage:** `admin rooms info view-room-topic ` + +###### **Arguments:** + +* `` + + + +## `admin rooms moderation` + +- Manage moderation of remote or local rooms + +**Usage:** `admin rooms moderation ` + +###### **Subcommands:** + +* `ban-room` — - Bans a room from local users joining and evicts all our local users (including server admins) from the room. Also blocks any invites (local and remote) for the banned room, and disables federation entirely with it +* `ban-list-of-rooms` — - Bans a list of rooms (room IDs and room aliases) from a newline delimited codeblock similar to `user deactivate-all`. Applies the same steps as ban-room +* `unban-room` — - Unbans a room to allow local users to join again +* `list-banned-rooms` — - List of all rooms we have banned + + + +## `admin rooms moderation ban-room` + +- Bans a room from local users joining and evicts all our local users (including server admins) from the room. Also blocks any invites (local and remote) for the banned room, and disables federation entirely with it + +**Usage:** `admin rooms moderation ban-room ` + +###### **Arguments:** + +* `` — The room in the format of `!roomid:example.com` or a room alias in the format of `#roomalias:example.com` + + + +## `admin rooms moderation ban-list-of-rooms` + +- Bans a list of rooms (room IDs and room aliases) from a newline delimited codeblock similar to `user deactivate-all`. Applies the same steps as ban-room + +**Usage:** `admin rooms moderation ban-list-of-rooms` + + + +## `admin rooms moderation unban-room` + +- Unbans a room to allow local users to join again + +**Usage:** `admin rooms moderation unban-room ` + +###### **Arguments:** + +* `` — The room in the format of `!roomid:example.com` or a room alias in the format of `#roomalias:example.com` + + + +## `admin rooms moderation list-banned-rooms` + +- List of all rooms we have banned + +**Usage:** `admin rooms moderation list-banned-rooms [OPTIONS]` + +###### **Options:** + +* `--no-details` — Whether to only output room IDs without supplementary room information + + + +## `admin rooms alias` + +- Manage rooms' aliases + +**Usage:** `admin rooms alias ` + +###### **Subcommands:** + +* `set` — - Make an alias point to a room +* `remove` — - Remove a local alias +* `which` — - Show which room is using an alias +* `list` — - List aliases currently being used + + + +## `admin rooms alias set` + +- Make an alias point to a room + +**Usage:** `admin rooms alias set [OPTIONS] ` + +###### **Arguments:** + +* `` — The room id to set the alias on +* `` — The alias localpart to use (`alias`, not `#alias:servername.tld`) + +###### **Options:** + +* `-f`, `--force` — Set the alias even if a room is already using it + + + +## `admin rooms alias remove` + +- Remove a local alias + +**Usage:** `admin rooms alias remove ` + +###### **Arguments:** + +* `` — The alias localpart to remove (`alias`, not `#alias:servername.tld`) + + + +## `admin rooms alias which` + +- Show which room is using an alias + +**Usage:** `admin rooms alias which ` + +###### **Arguments:** + +* `` — The alias localpart to look up (`alias`, not `#alias:servername.tld`) + + + +## `admin rooms alias list` + +- List aliases currently being used + +**Usage:** `admin rooms alias list [ROOM_ID]` + +###### **Arguments:** + +* `` — If set, only list the aliases for this room + + + +## `admin rooms directory` + +- Manage the room directory + +**Usage:** `admin rooms directory ` + +###### **Subcommands:** + +* `publish` — - Publish a room to the room directory +* `unpublish` — - Unpublish a room to the room directory +* `list` — - List rooms that are published + + + +## `admin rooms directory publish` + +- Publish a room to the room directory + +**Usage:** `admin rooms directory publish ` + +###### **Arguments:** + +* `` — The room id of the room to publish + + + +## `admin rooms directory unpublish` + +- Unpublish a room to the room directory + +**Usage:** `admin rooms directory unpublish ` + +###### **Arguments:** + +* `` — The room id of the room to unpublish + + + +## `admin rooms directory list` + +- List rooms that are published + +**Usage:** `admin rooms directory list [PAGE]` + +###### **Arguments:** + +* `` + + + +## `admin rooms exists` + +- Check if we know about a room + +**Usage:** `admin rooms exists ` + +###### **Arguments:** + +* `` + + + +## `admin federation` + +- Commands for managing federation + +**Usage:** `admin federation ` + +###### **Subcommands:** + +* `incoming-federation` — - List all rooms we are currently handling an incoming pdu from +* `disable-room` — - Disables incoming federation handling for a room +* `enable-room` — - Enables incoming federation handling for a room again +* `fetch-support-well-known` — - Fetch `/.well-known/matrix/support` from the specified server +* `remote-user-in-rooms` — - Lists all the rooms we share/track with the specified *remote* user + + + +## `admin federation incoming-federation` + +- List all rooms we are currently handling an incoming pdu from + +**Usage:** `admin federation incoming-federation` + + + +## `admin federation disable-room` + +- Disables incoming federation handling for a room + +**Usage:** `admin federation disable-room ` + +###### **Arguments:** + +* `` + + + +## `admin federation enable-room` + +- Enables incoming federation handling for a room again + +**Usage:** `admin federation enable-room ` + +###### **Arguments:** + +* `` + + + +## `admin federation fetch-support-well-known` + +- Fetch `/.well-known/matrix/support` from the specified server + +Despite the name, this is not a federation endpoint and does not go through the federation / server resolution process as per-spec this is supposed to be served at the server_name. + +Respecting homeservers put this file here for listing administration, moderation, and security inquiries. This command provides a way to easily fetch that information. + +**Usage:** `admin federation fetch-support-well-known ` + +###### **Arguments:** + +* `` + + + +## `admin federation remote-user-in-rooms` + +- Lists all the rooms we share/track with the specified *remote* user + +**Usage:** `admin federation remote-user-in-rooms ` + +###### **Arguments:** + +* `` + + + +## `admin server` + +- Commands for managing the server + +**Usage:** `admin server ` + +###### **Subcommands:** + +* `uptime` — - Time elapsed since startup +* `show-config` — - Show configuration values +* `reload-config` — - Reload configuration values +* `list-features` — - List the features built into the server +* `memory-usage` — - Print database memory usage statistics +* `clear-caches` — - Clears all of Continuwuity's caches +* `backup-database` — - Performs an online backup of the database (only available for RocksDB at the moment) +* `list-backups` — - List database backups +* `admin-notice` — - Send a message to the admin room +* `reload-mods` — - Hot-reload the server +* `restart` — - Restart the server +* `shutdown` — - Shutdown the server + + + +## `admin server uptime` + +- Time elapsed since startup + +**Usage:** `admin server uptime` + + + +## `admin server show-config` + +- Show configuration values + +**Usage:** `admin server show-config` + + + +## `admin server reload-config` + +- Reload configuration values + +**Usage:** `admin server reload-config [PATH]` + +###### **Arguments:** + +* `` + + + +## `admin server list-features` + +- List the features built into the server + +**Usage:** `admin server list-features [OPTIONS]` + +###### **Options:** + +* `-a`, `--available` +* `-e`, `--enabled` +* `-c`, `--comma` + + + +## `admin server memory-usage` + +- Print database memory usage statistics + +**Usage:** `admin server memory-usage` + + + +## `admin server clear-caches` + +- Clears all of Continuwuity's caches + +**Usage:** `admin server clear-caches` + + + +## `admin server backup-database` + +- Performs an online backup of the database (only available for RocksDB at the moment) + +**Usage:** `admin server backup-database` + + + +## `admin server list-backups` + +- List database backups + +**Usage:** `admin server list-backups` + + + +## `admin server admin-notice` + +- Send a message to the admin room + +**Usage:** `admin server admin-notice [MESSAGE]...` + +###### **Arguments:** + +* `` + + + +## `admin server reload-mods` + +- Hot-reload the server + +**Usage:** `admin server reload-mods` + + + +## `admin server restart` + +- Restart the server + +**Usage:** `admin server restart [OPTIONS]` + +###### **Options:** + +* `-f`, `--force` + + + +## `admin server shutdown` + +- Shutdown the server + +**Usage:** `admin server shutdown` + + + +## `admin media` + +- Commands for managing media + +**Usage:** `admin media ` + +###### **Subcommands:** + +* `delete` — - Deletes a single media file from our database and on the filesystem via a single MXC URL or event ID (not redacted) +* `delete-list` — - Deletes a codeblock list of MXC URLs from our database and on the filesystem. This will always ignore errors +* `delete-past-remote-media` — - Deletes all remote (and optionally local) media created before or after [duration] time using filesystem metadata first created at date, or fallback to last modified date. This will always ignore errors by default +* `delete-all-from-user` — - Deletes all the local media from a local user on our server. This will always ignore errors by default +* `delete-all-from-server` — - Deletes all remote media from the specified remote server. This will always ignore errors by default +* `get-file-info` — +* `get-remote-file` — +* `get-remote-thumbnail` — + + + +## `admin media delete` + +- Deletes a single media file from our database and on the filesystem via a single MXC URL or event ID (not redacted) + +**Usage:** `admin media delete [OPTIONS]` + +###### **Options:** + +* `--mxc ` — The MXC URL to delete +* `--event-id ` — - The message event ID which contains the media and thumbnail MXC URLs + + + +## `admin media delete-list` + +- Deletes a codeblock list of MXC URLs from our database and on the filesystem. This will always ignore errors + +**Usage:** `admin media delete-list` + + + +## `admin media delete-past-remote-media` + +- Deletes all remote (and optionally local) media created before or after [duration] time using filesystem metadata first created at date, or fallback to last modified date. This will always ignore errors by default + +**Usage:** `admin media delete-past-remote-media [OPTIONS] ` + +###### **Arguments:** + +* `` — - The relative time (e.g. 30s, 5m, 7d) within which to search + +###### **Options:** + +* `-b`, `--before` — - Only delete media created before [duration] ago +* `-a`, `--after` — - Only delete media created after [duration] ago +* `--yes-i-want-to-delete-local-media` — - Long argument to additionally delete local media + + + +## `admin media delete-all-from-user` + +- Deletes all the local media from a local user on our server. This will always ignore errors by default + +**Usage:** `admin media delete-all-from-user ` + +###### **Arguments:** + +* `` + + + +## `admin media delete-all-from-server` + +- Deletes all remote media from the specified remote server. This will always ignore errors by default + +**Usage:** `admin media delete-all-from-server [OPTIONS] ` + +###### **Arguments:** + +* `` + +###### **Options:** + +* `--yes-i-want-to-delete-local-media` — Long argument to delete local media + + + +## `admin media get-file-info` + +**Usage:** `admin media get-file-info ` + +###### **Arguments:** + +* `` — The MXC URL to lookup info for + + + +## `admin media get-remote-file` + +**Usage:** `admin media get-remote-file [OPTIONS] ` + +###### **Arguments:** + +* `` — The MXC URL to fetch + +###### **Options:** + +* `-s`, `--server ` +* `-t`, `--timeout ` + + Default value: `10000` + + + +## `admin media get-remote-thumbnail` + +**Usage:** `admin media get-remote-thumbnail [OPTIONS] ` + +###### **Arguments:** + +* `` — The MXC URL to fetch + +###### **Options:** + +* `-s`, `--server ` +* `-t`, `--timeout ` + + Default value: `10000` +* `--width ` + + Default value: `800` +* `--height ` + + Default value: `800` + + + +## `admin check` + +- Commands for checking integrity + +**Usage:** `admin check ` + +###### **Subcommands:** + +* `check-all-users` — + + + +## `admin check check-all-users` + +**Usage:** `admin check check-all-users` + + + +## `admin debug` + +- Commands for debugging things + +**Usage:** `admin debug ` + +###### **Subcommands:** + +* `echo` — - Echo input of admin command +* `get-auth-chain` — - Get the auth_chain of a PDU +* `parse-pdu` — - Parse and print a PDU from a JSON +* `get-pdu` — - Retrieve and print a PDU by EventID from the Continuwuity database +* `get-short-pdu` — - Retrieve and print a PDU by PduId from the Continuwuity database +* `get-remote-pdu` — - Attempts to retrieve a PDU from a remote server. Inserts it into our database/timeline if found and we do not have this PDU already (following normal event auth rules, handles it as an incoming PDU) +* `get-remote-pdu-list` — - Same as `get-remote-pdu` but accepts a codeblock newline delimited list of PDUs and a single server to fetch from +* `get-room-state` — - Gets all the room state events for the specified room +* `get-signing-keys` — - Get and display signing keys from local cache or remote server +* `get-verify-keys` — - Get and display signing keys from local cache or remote server +* `ping` — - Sends a federation request to the remote server's `/_matrix/federation/v1/version` endpoint and measures the latency it took for the server to respond +* `force-device-list-updates` — - Forces device lists for all local and remote users to be updated (as having new keys available) +* `change-log-level` — - Change tracing log level/filter on the fly +* `sign-json` — - Sign JSON blob +* `verify-json` — - Verify JSON signatures +* `verify-pdu` — - Verify PDU +* `first-pdu-in-room` — - Prints the very first PDU in the specified room (typically m.room.create) +* `latest-pdu-in-room` — - Prints the latest ("last") PDU in the specified room (typically a message) +* `force-set-room-state-from-server` — - Forcefully replaces the room state of our local copy of the specified room, with the copy (auth chain and room state events) the specified remote server says +* `resolve-true-destination` — - Runs a server name through Continuwuity's true destination resolution process +* `memory-stats` — - Print extended memory usage +* `runtime-metrics` — - Print general tokio runtime metric totals +* `runtime-interval` — - Print detailed tokio runtime metrics accumulated since last command invocation +* `time` — - Print the current time +* `list-dependencies` — - List dependencies +* `database-stats` — - Get database statistics +* `trim-memory` — - Trim memory usage +* `database-files` — - List database files + + + +## `admin debug echo` + +- Echo input of admin command + +**Usage:** `admin debug echo [MESSAGE]...` + +###### **Arguments:** + +* `` + + + +## `admin debug get-auth-chain` + +- Get the auth_chain of a PDU + +**Usage:** `admin debug get-auth-chain ` + +###### **Arguments:** + +* `` — An event ID (the $ character followed by the base64 reference hash) + + + +## `admin debug parse-pdu` + +- Parse and print a PDU from a JSON + +The PDU event is only checked for validity and is not added to the database. + +This command needs a JSON blob provided in a Markdown code block below the command. + +**Usage:** `admin debug parse-pdu` + + + +## `admin debug get-pdu` + +- Retrieve and print a PDU by EventID from the Continuwuity database + +**Usage:** `admin debug get-pdu ` + +###### **Arguments:** + +* `` — An event ID (a $ followed by the base64 reference hash) + + + +## `admin debug get-short-pdu` + +- Retrieve and print a PDU by PduId from the Continuwuity database + +**Usage:** `admin debug get-short-pdu ` + +###### **Arguments:** + +* `` — Shortroomid integer +* `` — Shorteventid integer + + + +## `admin debug get-remote-pdu` + +- Attempts to retrieve a PDU from a remote server. Inserts it into our database/timeline if found and we do not have this PDU already (following normal event auth rules, handles it as an incoming PDU) + +**Usage:** `admin debug get-remote-pdu ` + +###### **Arguments:** + +* `` — An event ID (a $ followed by the base64 reference hash) +* `` — Argument for us to attempt to fetch the event from the specified remote server + + + +## `admin debug get-remote-pdu-list` + +- Same as `get-remote-pdu` but accepts a codeblock newline delimited list of PDUs and a single server to fetch from + +**Usage:** `admin debug get-remote-pdu-list [OPTIONS] ` + +###### **Arguments:** + +* `` — Argument for us to attempt to fetch all the events from the specified remote server + +###### **Options:** + +* `-f`, `--force` — If set, ignores errors, else stops at the first error/failure + + + +## `admin debug get-room-state` + +- Gets all the room state events for the specified room. + +This is functionally equivalent to `GET /_matrix/client/v3/rooms/{roomid}/state`, except the admin command does *not* check if the sender user is allowed to see state events. This is done because it's implied that server admins here have database access and can see/get room info themselves anyways if they were malicious admins. + +Of course the check is still done on the actual client API. + +**Usage:** `admin debug get-room-state ` + +###### **Arguments:** + +* `` — Room ID + + + +## `admin debug get-signing-keys` + +- Get and display signing keys from local cache or remote server + +**Usage:** `admin debug get-signing-keys [OPTIONS] [SERVER_NAME]` + +###### **Arguments:** + +* `` + +###### **Options:** + +* `--notary ` +* `-q`, `--query` + + + +## `admin debug get-verify-keys` + +- Get and display signing keys from local cache or remote server + +**Usage:** `admin debug get-verify-keys [SERVER_NAME]` + +###### **Arguments:** + +* `` + + + +## `admin debug ping` + +- Sends a federation request to the remote server's `/_matrix/federation/v1/version` endpoint and measures the latency it took for the server to respond + +**Usage:** `admin debug ping ` + +###### **Arguments:** + +* `` + + + +## `admin debug force-device-list-updates` + +- Forces device lists for all local and remote users to be updated (as having new keys available) + +**Usage:** `admin debug force-device-list-updates` + + + +## `admin debug change-log-level` + +- Change tracing log level/filter on the fly + +This accepts the same format as the `log` config option. + +**Usage:** `admin debug change-log-level [OPTIONS] [FILTER]` + +###### **Arguments:** + +* `` — Log level/filter + +###### **Options:** + +* `-r`, `--reset` — Resets the log level/filter to the one in your config + + + +## `admin debug sign-json` + +- Sign JSON blob + +This command needs a JSON blob provided in a Markdown code block below the command. + +**Usage:** `admin debug sign-json` + + + +## `admin debug verify-json` + +- Verify JSON signatures + +This command needs a JSON blob provided in a Markdown code block below the command. + +**Usage:** `admin debug verify-json` + + + +## `admin debug verify-pdu` + +- Verify PDU + +This re-verifies a PDU existing in the database found by ID. + +**Usage:** `admin debug verify-pdu ` + +###### **Arguments:** + +* `` + + + +## `admin debug first-pdu-in-room` + +- Prints the very first PDU in the specified room (typically m.room.create) + +**Usage:** `admin debug first-pdu-in-room ` + +###### **Arguments:** + +* `` — The room ID + + + +## `admin debug latest-pdu-in-room` + +- Prints the latest ("last") PDU in the specified room (typically a message) + +**Usage:** `admin debug latest-pdu-in-room ` + +###### **Arguments:** + +* `` — The room ID + + + +## `admin debug force-set-room-state-from-server` + +- Forcefully replaces the room state of our local copy of the specified room, with the copy (auth chain and room state events) the specified remote server says. + +A common desire for room deletion is to simply "reset" our copy of the room. While this admin command is not a replacement for that, if you know you have split/broken room state and you know another server in the room that has the best/working room state, this command can let you use their room state. Such example is your server saying users are in a room, but other servers are saying they're not in the room in question. + +This command will get the latest PDU in the room we know about, and request the room state at that point in time via `/_matrix/federation/v1/state/{roomId}`. + +**Usage:** `admin debug force-set-room-state-from-server [EVENT_ID]` + +###### **Arguments:** + +* `` — The impacted room ID +* `` — The server we will use to query the room state for +* `` — The event ID of the latest known PDU in the room. Will be found automatically if not provided + + + +## `admin debug resolve-true-destination` + +- Runs a server name through Continuwuity's true destination resolution process + +Useful for debugging well-known issues + +**Usage:** `admin debug resolve-true-destination [OPTIONS] ` + +###### **Arguments:** + +* `` + +###### **Options:** + +* `-n`, `--no-cache` + + + +## `admin debug memory-stats` + +- Print extended memory usage + +Optional argument is a character mask (a sequence of characters in any order) which enable additional extended statistics. Known characters are "abdeglmx". For convenience, a '*' will enable everything. + +**Usage:** `admin debug memory-stats [OPTS]` + +###### **Arguments:** + +* `` + + + +## `admin debug runtime-metrics` + +- Print general tokio runtime metric totals + +**Usage:** `admin debug runtime-metrics` + + + +## `admin debug runtime-interval` + +- Print detailed tokio runtime metrics accumulated since last command invocation + +**Usage:** `admin debug runtime-interval` + + + +## `admin debug time` + +- Print the current time + +**Usage:** `admin debug time` + + + +## `admin debug list-dependencies` + +- List dependencies + +**Usage:** `admin debug list-dependencies [OPTIONS]` + +###### **Options:** + +* `-n`, `--names` + + + +## `admin debug database-stats` + +- Get database statistics + +**Usage:** `admin debug database-stats [OPTIONS] [PROPERTY]` + +###### **Arguments:** + +* `` + +###### **Options:** + +* `-m`, `--map ` + + + +## `admin debug trim-memory` + +- Trim memory usage + +**Usage:** `admin debug trim-memory` + + + +## `admin debug database-files` + +- List database files + +**Usage:** `admin debug database-files [OPTIONS] [MAP]` + +###### **Arguments:** + +* `` + +###### **Options:** + +* `--level ` + + + +## `admin query` + +- Low-level queries for database getters and iterators + +**Usage:** `admin query ` + +###### **Subcommands:** + +* `account-data` — - account_data.rs iterators and getters +* `appservice` — - appservice.rs iterators and getters +* `presence` — - presence.rs iterators and getters +* `room-alias` — - rooms/alias.rs iterators and getters +* `room-state-cache` — - rooms/state_cache iterators and getters +* `room-timeline` — - rooms/timeline iterators and getters +* `globals` — - globals.rs iterators and getters +* `sending` — - sending.rs iterators and getters +* `users` — - users.rs iterators and getters +* `resolver` — - resolver service +* `pusher` — - pusher service +* `short` — - short service +* `raw` — - raw service + + + +## `admin query account-data` + +- account_data.rs iterators and getters + +**Usage:** `admin query account-data ` + +###### **Subcommands:** + +* `changes-since` — - Returns all changes to the account data that happened after `since` +* `account-data-get` — - Searches the account data for a specific kind + + + +## `admin query account-data changes-since` + +- Returns all changes to the account data that happened after `since` + +**Usage:** `admin query account-data changes-since [ROOM_ID]` + +###### **Arguments:** + +* `` — Full user ID +* `` — UNIX timestamp since (u64) +* `` — Optional room ID of the account data + + + +## `admin query account-data account-data-get` + +- Searches the account data for a specific kind + +**Usage:** `admin query account-data account-data-get [ROOM_ID]` + +###### **Arguments:** + +* `` — Full user ID +* `` — Account data event type +* `` — Optional room ID of the account data + + + +## `admin query appservice` + +- appservice.rs iterators and getters + +**Usage:** `admin query appservice ` + +###### **Subcommands:** + +* `get-registration` — - Gets the appservice registration info/details from the ID as a string +* `all` — - Gets all appservice registrations with their ID and registration info + + + +## `admin query appservice get-registration` + +- Gets the appservice registration info/details from the ID as a string + +**Usage:** `admin query appservice get-registration ` + +###### **Arguments:** + +* `` — Appservice registration ID + + + +## `admin query appservice all` + +- Gets all appservice registrations with their ID and registration info + +**Usage:** `admin query appservice all` + + + +## `admin query presence` + +- presence.rs iterators and getters + +**Usage:** `admin query presence ` + +###### **Subcommands:** + +* `get-presence` — - Returns the latest presence event for the given user +* `presence-since` — - Iterator of the most recent presence updates that happened after the event with id `since` + + + +## `admin query presence get-presence` + +- Returns the latest presence event for the given user + +**Usage:** `admin query presence get-presence ` + +###### **Arguments:** + +* `` — Full user ID + + + +## `admin query presence presence-since` + +- Iterator of the most recent presence updates that happened after the event with id `since` + +**Usage:** `admin query presence presence-since ` + +###### **Arguments:** + +* `` — UNIX timestamp since (u64) + + + +## `admin query room-alias` + +- rooms/alias.rs iterators and getters + +**Usage:** `admin query room-alias ` + +###### **Subcommands:** + +* `resolve-local-alias` — +* `local-aliases-for-room` — - Iterator of all our local room aliases for the room ID +* `all-local-aliases` — - Iterator of all our local aliases in our database with their room IDs + + + +## `admin query room-alias resolve-local-alias` + +**Usage:** `admin query room-alias resolve-local-alias ` + +###### **Arguments:** + +* `` — Full room alias + + + +## `admin query room-alias local-aliases-for-room` + +- Iterator of all our local room aliases for the room ID + +**Usage:** `admin query room-alias local-aliases-for-room ` + +###### **Arguments:** + +* `` — Full room ID + + + +## `admin query room-alias all-local-aliases` + +- Iterator of all our local aliases in our database with their room IDs + +**Usage:** `admin query room-alias all-local-aliases` + + + +## `admin query room-state-cache` + +- rooms/state_cache iterators and getters + +**Usage:** `admin query room-state-cache ` + +###### **Subcommands:** + +* `server-in-room` — +* `room-servers` — +* `server-rooms` — +* `room-members` — +* `local-users-in-room` — +* `active-local-users-in-room` — +* `room-joined-count` — +* `room-invited-count` — +* `room-user-once-joined` — +* `room-members-invited` — +* `get-invite-count` — +* `get-left-count` — +* `rooms-joined` — +* `rooms-left` — +* `rooms-invited` — +* `invite-state` — + + + +## `admin query room-state-cache server-in-room` + +**Usage:** `admin query room-state-cache server-in-room ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin query room-state-cache room-servers` + +**Usage:** `admin query room-state-cache room-servers ` + +###### **Arguments:** + +* `` + + + +## `admin query room-state-cache server-rooms` + +**Usage:** `admin query room-state-cache server-rooms ` + +###### **Arguments:** + +* `` + + + +## `admin query room-state-cache room-members` + +**Usage:** `admin query room-state-cache room-members ` + +###### **Arguments:** + +* `` + + + +## `admin query room-state-cache local-users-in-room` + +**Usage:** `admin query room-state-cache local-users-in-room ` + +###### **Arguments:** + +* `` + + + +## `admin query room-state-cache active-local-users-in-room` + +**Usage:** `admin query room-state-cache active-local-users-in-room ` + +###### **Arguments:** + +* `` + + + +## `admin query room-state-cache room-joined-count` + +**Usage:** `admin query room-state-cache room-joined-count ` + +###### **Arguments:** + +* `` + + + +## `admin query room-state-cache room-invited-count` + +**Usage:** `admin query room-state-cache room-invited-count ` + +###### **Arguments:** + +* `` + + + +## `admin query room-state-cache room-user-once-joined` + +**Usage:** `admin query room-state-cache room-user-once-joined ` + +###### **Arguments:** + +* `` + + + +## `admin query room-state-cache room-members-invited` + +**Usage:** `admin query room-state-cache room-members-invited ` + +###### **Arguments:** + +* `` + + + +## `admin query room-state-cache get-invite-count` + +**Usage:** `admin query room-state-cache get-invite-count ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin query room-state-cache get-left-count` + +**Usage:** `admin query room-state-cache get-left-count ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin query room-state-cache rooms-joined` + +**Usage:** `admin query room-state-cache rooms-joined ` + +###### **Arguments:** + +* `` + + + +## `admin query room-state-cache rooms-left` + +**Usage:** `admin query room-state-cache rooms-left ` + +###### **Arguments:** + +* `` + + + +## `admin query room-state-cache rooms-invited` + +**Usage:** `admin query room-state-cache rooms-invited ` + +###### **Arguments:** + +* `` + + + +## `admin query room-state-cache invite-state` + +**Usage:** `admin query room-state-cache invite-state ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin query room-timeline` + +- rooms/timeline iterators and getters + +**Usage:** `admin query room-timeline ` + +###### **Subcommands:** + +* `pdus` — +* `last` — + + + +## `admin query room-timeline pdus` + +**Usage:** `admin query room-timeline pdus [OPTIONS] [FROM]` + +###### **Arguments:** + +* `` +* `` + +###### **Options:** + +* `-l`, `--limit ` + + + +## `admin query room-timeline last` + +**Usage:** `admin query room-timeline last ` + +###### **Arguments:** + +* `` + + + +## `admin query globals` + +- globals.rs iterators and getters + +**Usage:** `admin query globals ` + +###### **Subcommands:** + +* `database-version` — +* `current-count` — +* `last-check-for-announcements-id` — +* `signing-keys-for` — - This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server + + + +## `admin query globals database-version` + +**Usage:** `admin query globals database-version` + + + +## `admin query globals current-count` + +**Usage:** `admin query globals current-count` + + + +## `admin query globals last-check-for-announcements-id` + +**Usage:** `admin query globals last-check-for-announcements-id` + + + +## `admin query globals signing-keys-for` + +- This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server + +**Usage:** `admin query globals signing-keys-for ` + +###### **Arguments:** + +* `` + + + +## `admin query sending` + +- sending.rs iterators and getters + +**Usage:** `admin query sending ` + +###### **Subcommands:** + +* `active-requests` — - Queries database for all `servercurrentevent_data` +* `active-requests-for` — - Queries database for `servercurrentevent_data` but for a specific destination +* `queued-requests` — - Queries database for `servernameevent_data` which are the queued up requests that will eventually be sent +* `get-latest-edu-count` — + + + +## `admin query sending active-requests` + +- Queries database for all `servercurrentevent_data` + +**Usage:** `admin query sending active-requests` + + + +## `admin query sending active-requests-for` + +- Queries database for `servercurrentevent_data` but for a specific destination + +This command takes only *one* format of these arguments: + +appservice_id server_name user_id AND push_key + +See src/service/sending/mod.rs for the definition of the `Destination` enum + +**Usage:** `admin query sending active-requests-for [OPTIONS]` + +###### **Options:** + +* `-a`, `--appservice-id ` +* `-s`, `--server-name ` +* `-u`, `--user-id ` +* `-p`, `--push-key ` + + + +## `admin query sending queued-requests` + +- Queries database for `servernameevent_data` which are the queued up requests that will eventually be sent + +This command takes only *one* format of these arguments: + +appservice_id server_name user_id AND push_key + +See src/service/sending/mod.rs for the definition of the `Destination` enum + +**Usage:** `admin query sending queued-requests [OPTIONS]` + +###### **Options:** + +* `-a`, `--appservice-id ` +* `-s`, `--server-name ` +* `-u`, `--user-id ` +* `-p`, `--push-key ` + + + +## `admin query sending get-latest-edu-count` + +**Usage:** `admin query sending get-latest-edu-count ` + +###### **Arguments:** + +* `` + + + +## `admin query users` + +- users.rs iterators and getters + +**Usage:** `admin query users ` + +###### **Subcommands:** + +* `count-users` — +* `iter-users` — +* `iter-users2` — +* `password-hash` — +* `list-devices` — +* `list-devices-metadata` — +* `get-device-metadata` — +* `get-devices-version` — +* `count-one-time-keys` — +* `get-device-keys` — +* `get-user-signing-key` — +* `get-master-key` — +* `get-to-device-events` — +* `get-latest-backup` — +* `get-latest-backup-version` — +* `get-backup-algorithm` — +* `get-all-backups` — +* `get-room-backups` — +* `get-backup-session` — +* `get-shared-rooms` — + + + +## `admin query users count-users` + +**Usage:** `admin query users count-users` + + + +## `admin query users iter-users` + +**Usage:** `admin query users iter-users` + + + +## `admin query users iter-users2` + +**Usage:** `admin query users iter-users2` + + + +## `admin query users password-hash` + +**Usage:** `admin query users password-hash ` + +###### **Arguments:** + +* `` + + + +## `admin query users list-devices` + +**Usage:** `admin query users list-devices ` + +###### **Arguments:** + +* `` + + + +## `admin query users list-devices-metadata` + +**Usage:** `admin query users list-devices-metadata ` + +###### **Arguments:** + +* `` + + + +## `admin query users get-device-metadata` + +**Usage:** `admin query users get-device-metadata ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin query users get-devices-version` + +**Usage:** `admin query users get-devices-version ` + +###### **Arguments:** + +* `` + + + +## `admin query users count-one-time-keys` + +**Usage:** `admin query users count-one-time-keys ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin query users get-device-keys` + +**Usage:** `admin query users get-device-keys ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin query users get-user-signing-key` + +**Usage:** `admin query users get-user-signing-key ` + +###### **Arguments:** + +* `` + + + +## `admin query users get-master-key` + +**Usage:** `admin query users get-master-key ` + +###### **Arguments:** + +* `` + + + +## `admin query users get-to-device-events` + +**Usage:** `admin query users get-to-device-events ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin query users get-latest-backup` + +**Usage:** `admin query users get-latest-backup ` + +###### **Arguments:** + +* `` + + + +## `admin query users get-latest-backup-version` + +**Usage:** `admin query users get-latest-backup-version ` + +###### **Arguments:** + +* `` + + + +## `admin query users get-backup-algorithm` + +**Usage:** `admin query users get-backup-algorithm ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin query users get-all-backups` + +**Usage:** `admin query users get-all-backups ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin query users get-room-backups` + +**Usage:** `admin query users get-room-backups ` + +###### **Arguments:** + +* `` +* `` +* `` + + + +## `admin query users get-backup-session` + +**Usage:** `admin query users get-backup-session ` + +###### **Arguments:** + +* `` +* `` +* `` +* `` + + + +## `admin query users get-shared-rooms` + +**Usage:** `admin query users get-shared-rooms ` + +###### **Arguments:** + +* `` +* `` + + + +## `admin query resolver` + +- resolver service + +**Usage:** `admin query resolver ` + +###### **Subcommands:** + +* `destinations-cache` — Query the destinations cache +* `overrides-cache` — Query the overrides cache + + + +## `admin query resolver destinations-cache` + +Query the destinations cache + +**Usage:** `admin query resolver destinations-cache [SERVER_NAME]` + +###### **Arguments:** + +* `` + + + +## `admin query resolver overrides-cache` + +Query the overrides cache + +**Usage:** `admin query resolver overrides-cache [NAME]` + +###### **Arguments:** + +* `` + + + +## `admin query pusher` + +- pusher service + +**Usage:** `admin query pusher ` + +###### **Subcommands:** + +* `get-pushers` — - Returns all the pushers for the user + + + +## `admin query pusher get-pushers` + +- Returns all the pushers for the user + +**Usage:** `admin query pusher get-pushers ` + +###### **Arguments:** + +* `` — Full user ID + + + +## `admin query short` + +- short service + +**Usage:** `admin query short ` + +###### **Subcommands:** + +* `short-event-id` — +* `short-room-id` — + + + +## `admin query short short-event-id` + +**Usage:** `admin query short short-event-id ` + +###### **Arguments:** + +* `` + + + +## `admin query short short-room-id` + +**Usage:** `admin query short short-room-id ` + +###### **Arguments:** + +* `` + + + +## `admin query raw` + +- raw service + +**Usage:** `admin query raw ` + +###### **Subcommands:** + +* `raw-maps` — - List database maps +* `raw-get` — - Raw database query +* `raw-del` — - Raw database delete (for string keys) +* `raw-keys` — - Raw database keys iteration +* `raw-keys-sizes` — - Raw database key size breakdown +* `raw-keys-total` — - Raw database keys total bytes +* `raw-vals-sizes` — - Raw database values size breakdown +* `raw-vals-total` — - Raw database values total bytes +* `raw-iter` — - Raw database items iteration +* `raw-keys-from` — - Raw database keys iteration +* `raw-iter-from` — - Raw database items iteration +* `raw-count` — - Raw database record count +* `compact` — - Compact database + + + +## `admin query raw raw-maps` + +- List database maps + +**Usage:** `admin query raw raw-maps` + + + +## `admin query raw raw-get` + +- Raw database query + +**Usage:** `admin query raw raw-get ` + +###### **Arguments:** + +* `` — Map name +* `` — Key + + + +## `admin query raw raw-del` + +- Raw database delete (for string keys) + +**Usage:** `admin query raw raw-del ` + +###### **Arguments:** + +* `` — Map name +* `` — Key + + + +## `admin query raw raw-keys` + +- Raw database keys iteration + +**Usage:** `admin query raw raw-keys [PREFIX]` + +###### **Arguments:** + +* `` — Map name +* `` — Key prefix + + + +## `admin query raw raw-keys-sizes` + +- Raw database key size breakdown + +**Usage:** `admin query raw raw-keys-sizes [MAP] [PREFIX]` + +###### **Arguments:** + +* `` — Map name +* `` — Key prefix + + + +## `admin query raw raw-keys-total` + +- Raw database keys total bytes + +**Usage:** `admin query raw raw-keys-total [MAP] [PREFIX]` + +###### **Arguments:** + +* `` — Map name +* `` — Key prefix + + + +## `admin query raw raw-vals-sizes` + +- Raw database values size breakdown + +**Usage:** `admin query raw raw-vals-sizes [MAP] [PREFIX]` + +###### **Arguments:** + +* `` — Map name +* `` — Key prefix + + + +## `admin query raw raw-vals-total` + +- Raw database values total bytes + +**Usage:** `admin query raw raw-vals-total [MAP] [PREFIX]` + +###### **Arguments:** + +* `` — Map name +* `` — Key prefix + + + +## `admin query raw raw-iter` + +- Raw database items iteration + +**Usage:** `admin query raw raw-iter [PREFIX]` + +###### **Arguments:** + +* `` — Map name +* `` — Key prefix + + + +## `admin query raw raw-keys-from` + +- Raw database keys iteration + +**Usage:** `admin query raw raw-keys-from [OPTIONS] ` + +###### **Arguments:** + +* `` — Map name +* `` — Lower-bound + +###### **Options:** + +* `-l`, `--limit ` — Limit + + + +## `admin query raw raw-iter-from` + +- Raw database items iteration + +**Usage:** `admin query raw raw-iter-from [OPTIONS] ` + +###### **Arguments:** + +* `` — Map name +* `` — Lower-bound + +###### **Options:** + +* `-l`, `--limit ` — Limit + + + +## `admin query raw raw-count` + +- Raw database record count + +**Usage:** `admin query raw raw-count [MAP] [PREFIX]` + +###### **Arguments:** + +* `` — Map name +* `` — Key prefix + + + +## `admin query raw compact` + +- Compact database + +**Usage:** `admin query raw compact [OPTIONS]` + +###### **Options:** + +* `-m`, `--map ` +* `--start ` +* `--stop ` +* `--from ` +* `--into ` +* `--parallelism ` — There is one compaction job per column; then this controls how many columns are compacted in parallel. If zero, one compaction job is still run at a time here, but in exclusive-mode blocking any other automatic compaction jobs until complete +* `--exhaustive` + + Default value: `false` diff --git a/src/admin/admin.rs b/src/admin/admin.rs index 50b9db7c..e6479960 100644 --- a/src/admin/admin.rs +++ b/src/admin/admin.rs @@ -10,7 +10,7 @@ use crate::{ #[derive(Debug, Parser)] #[command(name = conduwuit_core::name(), version = conduwuit_core::version())] -pub(super) enum AdminCommand { +pub enum AdminCommand { #[command(subcommand)] /// - Commands for managing appservices Appservices(AppserviceCommand), diff --git a/src/admin/appservice/mod.rs b/src/admin/appservice/mod.rs index 2e0694aa..5e2712a9 100644 --- a/src/admin/appservice/mod.rs +++ b/src/admin/appservice/mod.rs @@ -7,7 +7,7 @@ use crate::admin_command_dispatch; #[derive(Debug, Subcommand)] #[admin_command_dispatch] -pub(super) enum AppserviceCommand { +pub enum AppserviceCommand { /// - Register an appservice using its registration YAML /// /// This command needs a YAML generated by an appservice (such as a bridge), diff --git a/src/admin/check/mod.rs b/src/admin/check/mod.rs index 30b335c4..a15968a7 100644 --- a/src/admin/check/mod.rs +++ b/src/admin/check/mod.rs @@ -7,6 +7,6 @@ use crate::admin_command_dispatch; #[admin_command_dispatch] #[derive(Debug, Subcommand)] -pub(super) enum CheckCommand { +pub enum CheckCommand { CheckAllUsers, } diff --git a/src/admin/debug/mod.rs b/src/admin/debug/mod.rs index fb8a3002..7a0769ab 100644 --- a/src/admin/debug/mod.rs +++ b/src/admin/debug/mod.rs @@ -11,7 +11,7 @@ use crate::admin_command_dispatch; #[admin_command_dispatch] #[derive(Debug, Subcommand)] -pub(super) enum DebugCommand { +pub enum DebugCommand { /// - Echo input of admin command Echo { message: Vec, diff --git a/src/admin/debug/tester.rs b/src/admin/debug/tester.rs index 0a2b1516..75da382b 100644 --- a/src/admin/debug/tester.rs +++ b/src/admin/debug/tester.rs @@ -4,7 +4,7 @@ use crate::{admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, clap::Subcommand)] -pub(crate) enum TesterCommand { +pub enum TesterCommand { Panic, Failure, Tester, diff --git a/src/admin/federation/mod.rs b/src/admin/federation/mod.rs index 2c539adc..48f79a56 100644 --- a/src/admin/federation/mod.rs +++ b/src/admin/federation/mod.rs @@ -8,7 +8,7 @@ use crate::admin_command_dispatch; #[admin_command_dispatch] #[derive(Debug, Subcommand)] -pub(super) enum FederationCommand { +pub enum FederationCommand { /// - List all rooms we are currently handling an incoming pdu from IncomingFederation, diff --git a/src/admin/media/mod.rs b/src/admin/media/mod.rs index d1e6cd3a..66d49959 100644 --- a/src/admin/media/mod.rs +++ b/src/admin/media/mod.rs @@ -9,7 +9,7 @@ use crate::admin_command_dispatch; #[admin_command_dispatch] #[derive(Debug, Subcommand)] -pub(super) enum MediaCommand { +pub enum MediaCommand { /// - Deletes a single media file from our database and on the filesystem /// via a single MXC URL or event ID (not redacted) Delete { @@ -90,10 +90,10 @@ pub(super) enum MediaCommand { #[arg(short, long, default_value("10000"))] timeout: u32, - #[arg(short, long, default_value("800"))] + #[arg(long, default_value("800"))] width: u32, - #[arg(short, long, default_value("800"))] + #[arg(long, default_value("800"))] height: u32, }, } diff --git a/src/admin/mod.rs b/src/admin/mod.rs index 1f777fa9..732b8ce0 100644 --- a/src/admin/mod.rs +++ b/src/admin/mod.rs @@ -33,6 +33,8 @@ conduwuit::mod_ctor! {} conduwuit::mod_dtor! {} conduwuit::rustc_flags_capture! {} +pub use crate::admin::AdminCommand; + /// Install the admin command processor pub async fn init(admin_service: &service::admin::Service) { _ = admin_service diff --git a/src/admin/query/account_data.rs b/src/admin/query/account_data.rs index 228d2120..2e044cef 100644 --- a/src/admin/query/account_data.rs +++ b/src/admin/query/account_data.rs @@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/account_data.rs -pub(crate) enum AccountDataCommand { +pub enum AccountDataCommand { /// - Returns all changes to the account data that happened after `since`. ChangesSince { /// Full user ID diff --git a/src/admin/query/appservice.rs b/src/admin/query/appservice.rs index 28bf6451..f9f15c25 100644 --- a/src/admin/query/appservice.rs +++ b/src/admin/query/appservice.rs @@ -6,7 +6,7 @@ use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/appservice.rs -pub(crate) enum AppserviceCommand { +pub enum AppserviceCommand { /// - Gets the appservice registration info/details from the ID as a string GetRegistration { /// Appservice registration ID diff --git a/src/admin/query/globals.rs b/src/admin/query/globals.rs index c8c1f512..6e945145 100644 --- a/src/admin/query/globals.rs +++ b/src/admin/query/globals.rs @@ -6,7 +6,7 @@ use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/globals.rs -pub(crate) enum GlobalsCommand { +pub enum GlobalsCommand { DatabaseVersion, CurrentCount, diff --git a/src/admin/query/mod.rs b/src/admin/query/mod.rs index da27eb1d..5b93086b 100644 --- a/src/admin/query/mod.rs +++ b/src/admin/query/mod.rs @@ -27,7 +27,7 @@ use crate::admin_command_dispatch; #[admin_command_dispatch] #[derive(Debug, Subcommand)] /// Query tables from database -pub(super) enum QueryCommand { +pub enum QueryCommand { /// - account_data.rs iterators and getters #[command(subcommand)] AccountData(AccountDataCommand), diff --git a/src/admin/query/presence.rs b/src/admin/query/presence.rs index 5b7ead4b..ccc3a431 100644 --- a/src/admin/query/presence.rs +++ b/src/admin/query/presence.rs @@ -7,7 +7,7 @@ use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/presence.rs -pub(crate) enum PresenceCommand { +pub enum PresenceCommand { /// - Returns the latest presence event for the given user. GetPresence { /// Full user ID diff --git a/src/admin/query/pusher.rs b/src/admin/query/pusher.rs index 0d0e6cc9..3f294017 100644 --- a/src/admin/query/pusher.rs +++ b/src/admin/query/pusher.rs @@ -5,7 +5,7 @@ use ruma::OwnedUserId; use crate::Context; #[derive(Debug, Subcommand)] -pub(crate) enum PusherCommand { +pub enum PusherCommand { /// - Returns all the pushers for the user. GetPushers { /// Full user ID diff --git a/src/admin/query/raw.rs b/src/admin/query/raw.rs index 0e248c65..5165b61a 100644 --- a/src/admin/query/raw.rs +++ b/src/admin/query/raw.rs @@ -19,7 +19,7 @@ use crate::{admin_command, admin_command_dispatch}; #[derive(Debug, Subcommand)] #[allow(clippy::enum_variant_names)] /// Query tables from database -pub(crate) enum RawCommand { +pub enum RawCommand { /// - List database maps RawMaps, diff --git a/src/admin/query/resolver.rs b/src/admin/query/resolver.rs index 4a39a40e..5b2d8d3b 100644 --- a/src/admin/query/resolver.rs +++ b/src/admin/query/resolver.rs @@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, Subcommand)] /// Resolver service and caches -pub(crate) enum ResolverCommand { +pub enum ResolverCommand { /// Query the destinations cache DestinationsCache { server_name: Option, diff --git a/src/admin/query/room_alias.rs b/src/admin/query/room_alias.rs index b646beec..fac1dd0a 100644 --- a/src/admin/query/room_alias.rs +++ b/src/admin/query/room_alias.rs @@ -7,7 +7,7 @@ use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/rooms/alias.rs -pub(crate) enum RoomAliasCommand { +pub enum RoomAliasCommand { ResolveLocalAlias { /// Full room alias alias: OwnedRoomAliasId, diff --git a/src/admin/query/room_state_cache.rs b/src/admin/query/room_state_cache.rs index c64cd173..798792d7 100644 --- a/src/admin/query/room_state_cache.rs +++ b/src/admin/query/room_state_cache.rs @@ -6,7 +6,7 @@ use ruma::{OwnedRoomId, OwnedServerName, OwnedUserId}; use crate::Context; #[derive(Debug, Subcommand)] -pub(crate) enum RoomStateCacheCommand { +pub enum RoomStateCacheCommand { ServerInRoom { server: OwnedServerName, room_id: OwnedRoomId, diff --git a/src/admin/query/room_timeline.rs b/src/admin/query/room_timeline.rs index 0fd22ca7..afcfec34 100644 --- a/src/admin/query/room_timeline.rs +++ b/src/admin/query/room_timeline.rs @@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, Subcommand)] /// Query tables from database -pub(crate) enum RoomTimelineCommand { +pub enum RoomTimelineCommand { Pdus { room_id: OwnedRoomOrAliasId, diff --git a/src/admin/query/sending.rs b/src/admin/query/sending.rs index 8b1676bc..b6350539 100644 --- a/src/admin/query/sending.rs +++ b/src/admin/query/sending.rs @@ -8,7 +8,7 @@ use crate::Context; #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/sending.rs -pub(crate) enum SendingCommand { +pub enum SendingCommand { /// - Queries database for all `servercurrentevent_data` ActiveRequests, diff --git a/src/admin/query/short.rs b/src/admin/query/short.rs index aa7c8666..3ebfbbcf 100644 --- a/src/admin/query/short.rs +++ b/src/admin/query/short.rs @@ -7,7 +7,7 @@ use crate::{admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, Subcommand)] /// Query tables from database -pub(crate) enum ShortCommand { +pub enum ShortCommand { ShortEventId { event_id: OwnedEventId, }, diff --git a/src/admin/query/users.rs b/src/admin/query/users.rs index 0f34d13f..2b5b3481 100644 --- a/src/admin/query/users.rs +++ b/src/admin/query/users.rs @@ -8,7 +8,7 @@ use crate::{admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, Subcommand)] /// All the getters and iterators from src/database/key_value/users.rs -pub(crate) enum UsersCommand { +pub enum UsersCommand { CountUsers, IterUsers, diff --git a/src/admin/room/alias.rs b/src/admin/room/alias.rs index 6b37ffe4..80e5d297 100644 --- a/src/admin/room/alias.rs +++ b/src/admin/room/alias.rs @@ -8,7 +8,7 @@ use ruma::{OwnedRoomAliasId, OwnedRoomId}; use crate::Context; #[derive(Debug, Subcommand)] -pub(crate) enum RoomAliasCommand { +pub enum RoomAliasCommand { /// - Make an alias point to a room. Set { #[arg(short, long)] diff --git a/src/admin/room/directory.rs b/src/admin/room/directory.rs index a6be9a15..cdefc99b 100644 --- a/src/admin/room/directory.rs +++ b/src/admin/room/directory.rs @@ -6,7 +6,7 @@ use ruma::OwnedRoomId; use crate::{Context, PAGE_SIZE, get_room_info}; #[derive(Debug, Subcommand)] -pub(crate) enum RoomDirectoryCommand { +pub enum RoomDirectoryCommand { /// - Publish a room to the room directory Publish { /// The room id of the room to publish diff --git a/src/admin/room/info.rs b/src/admin/room/info.rs index 1278e820..e35ddb27 100644 --- a/src/admin/room/info.rs +++ b/src/admin/room/info.rs @@ -7,7 +7,7 @@ use crate::{admin_command, admin_command_dispatch}; #[admin_command_dispatch] #[derive(Debug, Subcommand)] -pub(crate) enum RoomInfoCommand { +pub enum RoomInfoCommand { /// - List joined members in a room ListJoinedMembers { room_id: OwnedRoomId, diff --git a/src/admin/room/mod.rs b/src/admin/room/mod.rs index 26d2c2d8..00baf4c8 100644 --- a/src/admin/room/mod.rs +++ b/src/admin/room/mod.rs @@ -16,7 +16,7 @@ use crate::admin_command_dispatch; #[admin_command_dispatch] #[derive(Debug, Subcommand)] -pub(super) enum RoomCommand { +pub enum RoomCommand { /// - List all rooms the server knows about #[clap(alias = "list")] ListRooms { diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index 5fb5bb3e..4d106977 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -12,7 +12,7 @@ use crate::{admin_command, admin_command_dispatch, get_room_info}; #[admin_command_dispatch] #[derive(Debug, Subcommand)] -pub(crate) enum RoomModerationCommand { +pub enum RoomModerationCommand { /// - Bans a room from local users joining and evicts all our local users /// (including server /// admins) diff --git a/src/admin/server/mod.rs b/src/admin/server/mod.rs index 6b99e5de..cf46d034 100644 --- a/src/admin/server/mod.rs +++ b/src/admin/server/mod.rs @@ -9,7 +9,7 @@ use crate::admin_command_dispatch; #[admin_command_dispatch] #[derive(Debug, Subcommand)] -pub(super) enum ServerCommand { +pub enum ServerCommand { /// - Time elapsed since startup Uptime, diff --git a/src/admin/user/mod.rs b/src/admin/user/mod.rs index 645d3637..656cacaf 100644 --- a/src/admin/user/mod.rs +++ b/src/admin/user/mod.rs @@ -8,7 +8,7 @@ use crate::admin_command_dispatch; #[admin_command_dispatch] #[derive(Debug, Subcommand)] -pub(super) enum UserCommand { +pub enum UserCommand { /// - Create a new user #[clap(alias = "create")] CreateUser { diff --git a/src/main/Cargo.toml b/src/main/Cargo.toml index 2d8d26b5..cddf4156 100644 --- a/src/main/Cargo.toml +++ b/src/main/Cargo.toml @@ -14,6 +14,13 @@ rust-version.workspace = true version.workspace = true metadata.crane.workspace = true +[lib] +path = "mod.rs" +crate-type = [ + "rlib", +# "dylib", +] + [package.metadata.deb] name = "conduwuit" maintainer = "strawberry " diff --git a/src/main/mod.rs b/src/main/mod.rs new file mode 100644 index 00000000..ce9e3b9c --- /dev/null +++ b/src/main/mod.rs @@ -0,0 +1,14 @@ +#![type_length_limit = "49152"] //TODO: reduce me + +use conduwuit_core::rustc_flags_capture; + +pub(crate) mod clap; +mod logging; +mod mods; +mod restart; +mod runtime; +mod sentry; +mod server; +mod signal; + +rustc_flags_capture! {} diff --git a/xtask/generate-admin-command/Cargo.toml b/xtask/generate-admin-command/Cargo.toml new file mode 100644 index 00000000..5f27ee0c --- /dev/null +++ b/xtask/generate-admin-command/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "xtask-admin-command" +authors.workspace = true +categories.workspace = true +description.workspace = true +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +clap-markdown = "0.1.5" +clap_builder = { version = "4.5.38", default-features = false } +clap_mangen = "0.2" + +conduwuit-admin.workspace = true + +# Hack to prevent rebuilds +conduwuit.workspace = true + +[lints] +workspace = true diff --git a/xtask/generate-admin-command/src/main.rs b/xtask/generate-admin-command/src/main.rs new file mode 100644 index 00000000..46e7dad5 --- /dev/null +++ b/xtask/generate-admin-command/src/main.rs @@ -0,0 +1,63 @@ +use std::{ + fs::{self, File}, + io::{self, Write}, + path::Path, +}; + +use clap_builder::{Command, CommandFactory}; +use conduwuit_admin::AdminCommand; + +fn main() -> Result<(), Box> { + let mut args = std::env::args().skip(1); + let task = args.next(); + match task { + | None => todo!(), + | Some(t) => match t.as_str() { + | "man" => { + let dir = Path::new("./admin-man"); + gen_manpages(dir)?; + }, + | "md" => { + let command = AdminCommand::command().name("admin"); + + let res = clap_markdown::help_markdown_command_custom( + &command, + &clap_markdown::MarkdownOptions::default().show_footer(false), + ) + .replace("\n\r", "\n") + .replace("\r\n", "\n") + .replace(" \n", "\n"); + + let mut file = File::create(Path::new("./docs/admin_reference.md"))?; + Write::write_all(&mut file, res.trim_end().as_bytes())?; + file.write(b"\n")?; + }, + | invalid => return Err(format!("Invalid task name: {invalid}").into()), + }, + } + Ok(()) +} + +fn gen_manpages(dir: &Path) -> Result<(), io::Error> { + fn r#gen(dir: &Path, c: &Command, prefix: Option<&str>) -> Result<(), io::Error> { + fs::create_dir_all(dir)?; + let sub_name = c.get_display_name().unwrap_or_else(|| c.get_name()); + let name = if let Some(prefix) = prefix { + format!("{prefix}-{sub_name}") + } else { + sub_name.to_owned() + }; + + let mut out = File::create(dir.join(format!("{name}.1")))?; + let clap_mangen = clap_mangen::Man::new(c.to_owned().disable_help_flag(true)); + clap_mangen.render(&mut out)?; + + for sub in c.get_subcommands() { + r#gen(&dir.join(sub_name), sub, Some(&name))?; + } + + Ok(()) + } + + r#gen(dir, &AdminCommand::command().name("admin"), None) +} diff --git a/xtask/main/Cargo.toml b/xtask/main/Cargo.toml new file mode 100644 index 00000000..70c0c34b --- /dev/null +++ b/xtask/main/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "xtask" +authors.workspace = true +categories.workspace = true +description.workspace = true +edition.workspace = true +homepage.workspace = true +keywords.workspace = true +license.workspace = true +readme.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[dependencies] +clap.workspace = true +# Required for working with JSON output from cargo metadata +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" + +[lints] +workspace = true diff --git a/xtask/main/src/main.rs b/xtask/main/src/main.rs new file mode 100644 index 00000000..0b244114 --- /dev/null +++ b/xtask/main/src/main.rs @@ -0,0 +1,11 @@ +use std::{env, process::Command}; + +fn main() -> Result<(), Box> { + let mut child = Command::new("cargo").args(["run", "--package", "xtask-admin-command", "--"].into_iter().map(ToOwned::to_owned).chain(env::args().skip(2))) + // .stdout(Stdio::piped()) + // .stderr(Stdio::piped()) + .spawn() + .expect("failed to execute child"); + child.wait()?; + Ok(()) +} From 28a29c3a7b2b01b6c26bc9e92dc4ce2d1fcf9164 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 6 Jul 2025 21:59:20 +0100 Subject: [PATCH 093/270] feat: Generate binary documentation Also refactors main.rs/mod.rs to silence clippy --- Cargo.lock | 2 +- Cargo.toml | 2 +- docs/server_reference.md | 21 +++ src/main/clap.rs | 36 +++--- src/main/main.rs | 121 +----------------- src/main/mod.rs | 119 ++++++++++++++++- src/main/server.rs | 7 +- xtask/generate-admin-command/src/main.rs | 63 --------- .../Cargo.toml | 2 +- xtask/generate-generate-commands/src/main.rs | 113 ++++++++++++++++ xtask/main/Cargo.toml | 2 +- xtask/main/src/main.rs | 2 +- 12 files changed, 281 insertions(+), 209 deletions(-) create mode 100644 docs/server_reference.md delete mode 100644 xtask/generate-admin-command/src/main.rs rename xtask/{generate-admin-command => generate-generate-commands}/Cargo.toml (94%) create mode 100644 xtask/generate-generate-commands/src/main.rs diff --git a/Cargo.lock b/Cargo.lock index fe8cb16d..d950e9da 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6129,7 +6129,7 @@ dependencies = [ ] [[package]] -name = "xtask-admin-command" +name = "xtask-generate-commands" version = "0.5.0-rc.6" dependencies = [ "clap-markdown", diff --git a/Cargo.toml b/Cargo.toml index 03c5b489..ef917332 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -768,7 +768,7 @@ inherits = "dev" # '-Clink-arg=-Wl,-z,nodlopen', # '-Clink-arg=-Wl,-z,nodelete', #] -[profile.dev.package.xtask-admin-command] +[profile.dev.package.xtask-generate-commands] inherits = "dev" [profile.dev.package.conduwuit] inherits = "dev" diff --git a/docs/server_reference.md b/docs/server_reference.md new file mode 100644 index 00000000..e34bc51e --- /dev/null +++ b/docs/server_reference.md @@ -0,0 +1,21 @@ +# Command-Line Help for `continuwuity` + +This document contains the help content for the `continuwuity` command-line program. + +**Command Overview:** + +* [`continuwuity`↴](#continuwuity) + +## `continuwuity` + +a very cool Matrix chat homeserver written in Rust + +**Usage:** `continuwuity [OPTIONS]` + +###### **Options:** + +* `-c`, `--config ` — Path to the config TOML file (optional) +* `-O`, `--option