From 9b6ac6c45f44a18d68208038b360212205830a7f Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 19:57:02 +0100 Subject: [PATCH 01/52] fix: Ignore existing membership when room is disconnected --- src/api/client/membership.rs | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index e587d806..1800fa60 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -925,24 +925,32 @@ pub async fn join_room_by_id_helper( return Ok(join_room_by_id::v3::Response { room_id: room_id.into() }); } - if let Ok(membership) = services - .rooms - .state_accessor - .get_member(room_id, sender_user) - .await - { - if membership.membership == MembershipState::Ban { - debug_warn!("{sender_user} is banned from {room_id} but attempted to join"); - return Err!(Request(Forbidden("You are banned from the room."))); - } - } - let server_in_room = services .rooms .state_cache .server_in_room(services.globals.server_name(), room_id) .await; + // Only check our known membership if we're already in the room. + // See: https://forgejo.ellis.link/continuwuation/continuwuity/issues/855 + let membership = if server_in_room { + services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .await + } else { + debug!("Ignoring local state for join {room_id}, we aren't in the room yet."); + Ok(RoomMemberEventContent::new(MembershipState::Leave)) + }; + if let Ok(m) = membership { + if m.membership == MembershipState::Ban { + debug_warn!("{sender_user} is banned from {room_id} but attempted to join"); + // TODO: return reason + return Err!(Request(Forbidden("You are banned from the room."))); + } + } + let local_join = server_in_room || servers.is_empty() || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); From d63c8b9fcaccb93981b0afcccb6c1c6d823d0f73 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 13:16:31 +0100 Subject: [PATCH 02/52] feat: Support passing through MSC4293 redact_events --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- src/api/client/membership.rs | 1 + 3 files changed, 13 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7852d2ca..ab5702e8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3798,7 +3798,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "assign", "js_int", @@ -3818,7 +3818,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "js_int", "ruma-common", @@ -3830,7 +3830,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "as_variant", "assign", @@ -3853,7 +3853,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "as_variant", "base64 0.22.1", @@ -3885,7 +3885,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "as_variant", "indexmap 2.9.0", @@ -3910,7 +3910,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "bytes", "headers", @@ -3932,7 +3932,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3941,7 +3941,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "js_int", "ruma-common", @@ -3951,7 +3951,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3966,7 +3966,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "js_int", "ruma-common", @@ -3978,7 +3978,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=d6870a7fb7f6cccff63f7fd0ff6c581bad80e983#d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index f3de5d7f..d54b86a4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "d6870a7fb7f6cccff63f7fd0ff6c581bad80e983" +rev = "0012040617213eccb682b65d0ac76c0e5cc94d8c" features = [ "compat", "rand", diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 1800fa60..b05e2003 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -646,6 +646,7 @@ pub(crate) async fn ban_user_route( is_direct: None, join_authorized_via_users_server: None, third_party_invite: None, + redact_events: body.redact_events, ..current_member_content }), sender_user, From 4b5e8df95c60435fd3e6bce016229108c0494911 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 13:29:27 +0100 Subject: [PATCH 03/52] fix: Add missing init fields --- src/api/client/membership.rs | 1 + src/api/client/profile.rs | 2 ++ src/api/client/room/upgrade.rs | 1 + 3 files changed, 4 insertions(+) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index b05e2003..145b3cde 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1824,6 +1824,7 @@ pub async fn leave_room( displayname: None, third_party_invite: None, blurhash: None, + redact_events: None, }; let is_banned = services.rooms.metadata.is_banned(room_id); diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index 3699b590..e2d1c934 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -343,6 +343,7 @@ pub async fn update_displayname( reason: None, is_direct: None, third_party_invite: None, + redact_events: None, }); Ok((pdu, room_id)) @@ -396,6 +397,7 @@ pub async fn update_avatar_url( reason: None, is_direct: None, third_party_invite: None, + redact_events: None, }); Ok((pdu, room_id)) diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 9ec0b3bb..da5b49fe 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -189,6 +189,7 @@ pub(crate) async fn upgrade_room_route( blurhash: services.users.blurhash(sender_user).await.ok(), reason: None, join_authorized_via_users_server: None, + redact_events: None, }) .expect("event is valid, we just created it"), unsigned: None, From b4bdd1ee65dc1c0aa4ff974c13a0237542552fc2 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 13:43:27 +0100 Subject: [PATCH 04/52] chore: Update ruwuma Fixes the wrong field name being serialised --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ab5702e8..92044b92 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3798,7 +3798,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.10.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "assign", "js_int", @@ -3818,7 +3818,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.10.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "js_int", "ruma-common", @@ -3830,7 +3830,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.18.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "as_variant", "assign", @@ -3853,7 +3853,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "as_variant", "base64 0.22.1", @@ -3885,7 +3885,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.28.1" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "as_variant", "indexmap 2.9.0", @@ -3910,7 +3910,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "bytes", "headers", @@ -3932,7 +3932,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.5" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "js_int", "thiserror 2.0.12", @@ -3941,7 +3941,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "js_int", "ruma-common", @@ -3951,7 +3951,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.13.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "cfg-if", "proc-macro-crate", @@ -3966,7 +3966,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.9.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "js_int", "ruma-common", @@ -3978,7 +3978,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.15.0" -source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=0012040617213eccb682b65d0ac76c0e5cc94d8c#0012040617213eccb682b65d0ac76c0e5cc94d8c" +source = "git+https://forgejo.ellis.link/continuwuation/ruwuma?rev=9b65f83981f6f53d185ce77da37aaef9dfd764a9#9b65f83981f6f53d185ce77da37aaef9dfd764a9" dependencies = [ "base64 0.22.1", "ed25519-dalek", diff --git a/Cargo.toml b/Cargo.toml index d54b86a4..5c289adf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -350,7 +350,7 @@ version = "0.1.2" [workspace.dependencies.ruma] git = "https://forgejo.ellis.link/continuwuation/ruwuma" #branch = "conduwuit-changes" -rev = "0012040617213eccb682b65d0ac76c0e5cc94d8c" +rev = "9b65f83981f6f53d185ce77da37aaef9dfd764a9" features = [ "compat", "rand", From fac9e090cdebbd21c3b282ed04c61d0b8c86ebdd Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:32:02 +0100 Subject: [PATCH 05/52] feat: Add suspension helper to user service --- src/service/users/mod.rs | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 701561a8..1a9f6600 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -15,6 +15,7 @@ use ruma::{ AnyToDeviceEvent, GlobalAccountDataEventType, ignored_user_list::IgnoredUserListEvent, }, serde::Raw, + uint, }; use serde_json::json; @@ -52,6 +53,7 @@ struct Data { userid_lastonetimekeyupdate: Arc, userid_masterkeyid: Arc, userid_password: Arc, + userid_suspended: Arc, userid_selfsigningkeyid: Arc, userid_usersigningkeyid: Arc, useridprofilekey_value: Arc, @@ -87,6 +89,7 @@ impl crate::Service for Service { userid_lastonetimekeyupdate: args.db["userid_lastonetimekeyupdate"].clone(), userid_masterkeyid: args.db["userid_masterkeyid"].clone(), userid_password: args.db["userid_password"].clone(), + userid_suspended: args.db["userid_suspended"].clone(), userid_selfsigningkeyid: args.db["userid_selfsigningkeyid"].clone(), userid_usersigningkeyid: args.db["userid_usersigningkeyid"].clone(), useridprofilekey_value: args.db["useridprofilekey_value"].clone(), @@ -143,6 +146,16 @@ impl Service { Ok(()) } + /// Suspend account, placing it in a read-only state + pub async fn suspend_account(&self, user_id: &UserId) -> () { + self.db.userid_suspended.insert(user_id, "1"); + } + + /// Unsuspend account, placing it in a read-write state + pub async fn unsuspend_account(&self, user_id: &UserId) -> () { + self.db.userid_suspended.remove(user_id); + } + /// Check if a user has an account on this homeserver. #[inline] pub async fn exists(&self, user_id: &UserId) -> bool { @@ -159,6 +172,16 @@ impl Service { .await } + /// Check if account is suspended + pub async fn is_suspended(&self, user_id: &UserId) -> Result { + self.db + .userid_suspended + .get(user_id) + .map_ok(|val| val.is_empty()) + .map_err(|_| err!(Request(NotFound("User does not exist.")))) + .await + } + /// Check if account is active, infallible pub async fn is_active(&self, user_id: &UserId) -> bool { !self.is_deactivated(user_id).await.unwrap_or(true) From accfda258638d7eef9b2c35c15e2b1658a478743 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:35:58 +0100 Subject: [PATCH 06/52] feat: Prevent suspended users sending events --- src/api/client/send.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/api/client/send.rs b/src/api/client/send.rs index f753fa65..b87d1822 100644 --- a/src/api/client/send.rs +++ b/src/api/client/send.rs @@ -23,6 +23,9 @@ pub(crate) async fn send_message_event_route( let sender_user = body.sender_user(); let sender_device = body.sender_device.as_deref(); let appservice_info = body.appservice_info.as_ref(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } // Forbid m.room.encrypted if encryption is disabled if MessageLikeEventType::RoomEncrypted == body.event_type && !services.config.allow_encryption From 286974cb9a676bb58501835fbdf1fce08b3d9c6c Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:37:09 +0100 Subject: [PATCH 07/52] feat: Prevent suspended users redacting events --- src/api/client/redact.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/api/client/redact.rs b/src/api/client/redact.rs index 8dbe47a6..a8eaf91d 100644 --- a/src/api/client/redact.rs +++ b/src/api/client/redact.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{Result, matrix::pdu::PduBuilder}; +use conduwuit::{Err, Result, matrix::pdu::PduBuilder}; use ruma::{ api::client::redact::redact_event, events::room::redaction::RoomRedactionEventContent, }; @@ -17,6 +17,10 @@ pub(crate) async fn redact_event_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; + if services.users.is_suspended(sender_user).await? { + // TODO: Users can redact their own messages while suspended + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; From a6ba9e3045e7cdaccbae006d0037b045c1348016 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:39:24 +0100 Subject: [PATCH 08/52] feat: Prevent suspended users changing their profile --- src/api/client/profile.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/api/client/profile.rs b/src/api/client/profile.rs index e2d1c934..bdba4078 100644 --- a/src/api/client/profile.rs +++ b/src/api/client/profile.rs @@ -36,6 +36,9 @@ pub(crate) async fn set_displayname_route( body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } if *sender_user != body.user_id && body.appservice_info.is_none() { return Err!(Request(Forbidden("You cannot update the profile of another user"))); @@ -125,6 +128,9 @@ pub(crate) async fn set_avatar_url_route( body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } if *sender_user != body.user_id && body.appservice_info.is_none() { return Err!(Request(Forbidden("You cannot update the profile of another user"))); From a94128e6986f7f05909329de694e547543b4dc36 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:39:57 +0100 Subject: [PATCH 09/52] feat: Prevent suspended users joining/knocking on rooms --- src/api/client/membership.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index 145b3cde..d78ebdec 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -178,6 +178,9 @@ pub(crate) async fn join_room_by_id_route( body: Ruma, ) -> Result { let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } banned_room_check( &services, @@ -249,6 +252,9 @@ pub(crate) async fn join_room_by_id_or_alias_route( let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let appservice_info = &body.appservice_info; let body = body.body; + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { | Ok(room_id) => { @@ -369,6 +375,9 @@ pub(crate) async fn knock_room_route( ) -> Result { let sender_user = body.sender_user(); let body = &body.body; + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { | Ok(room_id) => { @@ -492,6 +501,9 @@ pub(crate) async fn invite_user_route( body: Ruma, ) -> Result { let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { debug_error!( From e127c4e5a2e08498c61df30b8424a01f30671bca Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:46:22 +0100 Subject: [PATCH 10/52] feat: Add un/suspend admin commands --- src/admin/user/commands.rs | 34 ++++++++++++++++++++++++++++++++++ src/admin/user/mod.rs | 22 ++++++++++++++++++++++ 2 files changed, 56 insertions(+) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index e5e481e5..297e4bb3 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -224,6 +224,40 @@ pub(super) async fn deactivate(&self, no_leave_rooms: bool, user_id: String) -> .await } +#[admin_command] +pub(super) async fn suspend(&self, user_id: String) -> Result { + let user_id = parse_local_user_id(self.services, &user_id)?; + + if user_id == self.services.globals.server_user { + return Err!("Not allowed to suspend the server service account.",); + } + + if !self.services.users.exists(&user_id).await { + return Err!("User {user_id} does not exist."); + } + self.services.users.suspend_account(&user_id).await; + + self.write_str(&format!("User {user_id} has been suspended.")) + .await +} + +#[admin_command] +pub(super) async fn unsuspend(&self, user_id: String) -> Result { + let user_id = parse_local_user_id(self.services, &user_id)?; + + if user_id == self.services.globals.server_user { + return Err!("Not allowed to unsuspend the server service account.",); + } + + if !self.services.users.exists(&user_id).await { + return Err!("User {user_id} does not exist."); + } + self.services.users.unsuspend_account(&user_id).await; + + self.write_str(&format!("User {user_id} has been unsuspended.")) + .await +} + #[admin_command] pub(super) async fn reset_password(&self, username: String, password: Option) -> Result { let user_id = parse_local_user_id(self.services, &username)?; diff --git a/src/admin/user/mod.rs b/src/admin/user/mod.rs index e789376a..645d3637 100644 --- a/src/admin/user/mod.rs +++ b/src/admin/user/mod.rs @@ -59,6 +59,28 @@ pub(super) enum UserCommand { force: bool, }, + /// - Suspend a user + /// + /// Suspended users are able to log in, sync, and read messages, but are not + /// able to send events nor redact them, cannot change their profile, and + /// are unable to join, invite to, or knock on rooms. + /// + /// Suspended users can still leave rooms and deactivate their account. + /// Suspending them effectively makes them read-only. + Suspend { + /// Username of the user to suspend + user_id: String, + }, + + /// - Unsuspend a user + /// + /// Reverses the effects of the `suspend` command, allowing the user to send + /// messages, change their profile, create room invites, etc. + Unsuspend { + /// Username of the user to unsuspend + user_id: String, + }, + /// - List local users in the database #[clap(alias = "list")] ListUsers, From 5d5350a9fee3b4959ef66b0d18a588d2f2be7dde Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 20:47:02 +0100 Subject: [PATCH 11/52] feat: Prevent suspended users creating new rooms --- src/api/client/room/create.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/api/client/room/create.rs b/src/api/client/room/create.rs index be3fd23b..d1dffc51 100644 --- a/src/api/client/room/create.rs +++ b/src/api/client/room/create.rs @@ -70,6 +70,10 @@ pub(crate) async fn create_room_route( )); } + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + let room_id: OwnedRoomId = match &body.room_id { | Some(custom_room_id) => custom_room_id_check(&services, custom_room_id)?, | _ => RoomId::new(&services.server.name), From 968c0e236c37d204d9718689dd79888cf32f54e5 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 21:19:10 +0100 Subject: [PATCH 12/52] fix: Create the column appropriately --- src/database/maps.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/database/maps.rs b/src/database/maps.rs index 19f9ced4..91ba2ebe 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -378,6 +378,10 @@ pub(super) static MAPS: &[Descriptor] = &[ name: "userid_password", ..descriptor::RANDOM }, + Descriptor { + name: "userid_suspended", + ..descriptor::RANDOM_SMALL + }, Descriptor { name: "userid_presenceid", ..descriptor::RANDOM_SMALL From 8791a9b851f8fc3cd827fe898406561fc4104287 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 21:19:37 +0100 Subject: [PATCH 13/52] fix: Inappropriate empty check I once again, assumed `true` is actually `false`. --- src/service/users/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 1a9f6600..6d3b662f 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -177,7 +177,7 @@ impl Service { self.db .userid_suspended .get(user_id) - .map_ok(|val| val.is_empty()) + .map_ok(|val| !val.is_empty()) .map_err(|_| err!(Request(NotFound("User does not exist.")))) .await } From cc864dc8bbb299b7a48c6fb963a61b6979ceb278 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 21:20:56 +0100 Subject: [PATCH 14/52] feat: Do not allow suspending admin users --- src/admin/user/commands.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 297e4bb3..61f10a86 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -235,6 +235,9 @@ pub(super) async fn suspend(&self, user_id: String) -> Result { if !self.services.users.exists(&user_id).await { return Err!("User {user_id} does not exist."); } + if self.services.users.is_admin(&user_id).await { + return Err!("Admin users cannot be suspended."); + } self.services.users.suspend_account(&user_id).await; self.write_str(&format!("User {user_id} has been suspended.")) From 1ff8af8e9e6ef723287dd9b174dc69844bf8631c Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 21:24:20 +0100 Subject: [PATCH 15/52] style: Remove unneeded statements (clippy) --- src/service/users/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 6d3b662f..f6a98858 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -15,7 +15,6 @@ use ruma::{ AnyToDeviceEvent, GlobalAccountDataEventType, ignored_user_list::IgnoredUserListEvent, }, serde::Raw, - uint, }; use serde_json::json; @@ -147,12 +146,12 @@ impl Service { } /// Suspend account, placing it in a read-only state - pub async fn suspend_account(&self, user_id: &UserId) -> () { + pub async fn suspend_account(&self, user_id: &UserId) { self.db.userid_suspended.insert(user_id, "1"); } /// Unsuspend account, placing it in a read-write state - pub async fn unsuspend_account(&self, user_id: &UserId) -> () { + pub async fn unsuspend_account(&self, user_id: &UserId) { self.db.userid_suspended.remove(user_id); } From d0548ec0644ebb0e3538f65088db9b16ce0eb5a8 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 21:30:07 +0100 Subject: [PATCH 16/52] feat: Forbid suspended users from sending state events --- src/api/client/state.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 2ddc8f14..07802b1b 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -33,6 +33,10 @@ pub(crate) async fn send_state_event_for_key_route( ) -> Result { let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + Ok(send_state_event::v3::Response { event_id: send_state_event_for_key_helper( &services, From 90180916eb13cdcf4e81c668fa6255222c781f1d Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 22:42:31 +0100 Subject: [PATCH 17/52] feat: Prevent suspended users performing room changes Prevents kicks, bans, unbans, and alias modification --- src/api/client/alias.rs | 6 ++++++ src/api/client/directory.rs | 3 +++ src/api/client/membership.rs | 16 ++++++++++++++-- 3 files changed, 23 insertions(+), 2 deletions(-) diff --git a/src/api/client/alias.rs b/src/api/client/alias.rs index 9f1b05f8..dc7aad44 100644 --- a/src/api/client/alias.rs +++ b/src/api/client/alias.rs @@ -18,6 +18,9 @@ pub(crate) async fn create_alias_route( body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } services .rooms @@ -63,6 +66,9 @@ pub(crate) async fn delete_alias_route( body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } services .rooms diff --git a/src/api/client/directory.rs b/src/api/client/directory.rs index aa6ae168..2e219fd9 100644 --- a/src/api/client/directory.rs +++ b/src/api/client/directory.rs @@ -128,6 +128,9 @@ pub(crate) async fn set_room_visibility_route( // Return 404 if the room doesn't exist return Err!(Request(NotFound("Room not found"))); } + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } if services .users diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index d78ebdec..e6392533 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -578,6 +578,10 @@ pub(crate) async fn kick_user_route( State(services): State, body: Ruma, ) -> Result { + let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; let Ok(event) = services @@ -613,7 +617,7 @@ pub(crate) async fn kick_user_route( third_party_invite: None, ..event }), - body.sender_user(), + sender_user, &body.room_id, &state_lock, ) @@ -637,6 +641,10 @@ pub(crate) async fn ban_user_route( return Err!(Request(Forbidden("You cannot ban yourself."))); } + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; let current_member_content = services @@ -679,6 +687,10 @@ pub(crate) async fn unban_user_route( State(services): State, body: Ruma, ) -> Result { + let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; let current_member_content = services @@ -707,7 +719,7 @@ pub(crate) async fn unban_user_route( is_direct: None, ..current_member_content }), - body.sender_user(), + sender_user, &body.room_id, &state_lock, ) From 8e06571e7c7e88c14580c7e6206aecdb12a5eb44 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 22:42:49 +0100 Subject: [PATCH 18/52] feat: Prevent suspended users uploading media --- src/api/client/media.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/api/client/media.rs b/src/api/client/media.rs index 94572413..11d5450c 100644 --- a/src/api/client/media.rs +++ b/src/api/client/media.rs @@ -52,6 +52,9 @@ pub(crate) async fn create_content_route( body: Ruma, ) -> Result { let user = body.sender_user.as_ref().expect("user is authenticated"); + if services.users.is_suspended(user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } let filename = body.filename.as_deref(); let content_type = body.content_type.as_deref(); From 08527a28804f673817793af86ea94086fcba3a5c Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 22:43:35 +0100 Subject: [PATCH 19/52] feat: Prevent suspended users upgrading rooms --- src/api/client/room/upgrade.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index da5b49fe..d8f5ea83 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -2,7 +2,7 @@ use std::cmp::max; use axum::extract::State; use conduwuit::{ - Error, Result, err, info, + Err, Error, Result, err, info, matrix::{StateKey, pdu::PduBuilder}, }; use futures::StreamExt; @@ -63,6 +63,10 @@ pub(crate) async fn upgrade_room_route( )); } + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + // Create a replacement room let replacement_room = RoomId::new(services.globals.server_name()); From 1124097bd17eea3d10b35b9f05539321050b248a Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 22:52:20 +0100 Subject: [PATCH 20/52] feat: Only allow private read receipts when suspended --- src/api/client/read_marker.rs | 49 +++++++++++++++++++---------------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/src/api/client/read_marker.rs b/src/api/client/read_marker.rs index fbfc8fea..e152869c 100644 --- a/src/api/client/read_marker.rs +++ b/src/api/client/read_marker.rs @@ -58,29 +58,34 @@ pub(crate) async fn set_read_marker_route( } if let Some(event) = &body.read_receipt { - let receipt_content = BTreeMap::from_iter([( - event.to_owned(), - BTreeMap::from_iter([( - ReceiptType::Read, - BTreeMap::from_iter([(sender_user.to_owned(), ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - thread: ReceiptThread::Unthreaded, - })]), - )]), - )]); + if !services.users.is_suspended(sender_user).await? { + let receipt_content = BTreeMap::from_iter([( + event.to_owned(), + BTreeMap::from_iter([( + ReceiptType::Read, + BTreeMap::from_iter([( + sender_user.to_owned(), + ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, + }, + )]), + )]), + )]); - services - .rooms - .read_receipt - .readreceipt_update( - sender_user, - &body.room_id, - &ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - ) - .await; + services + .rooms + .read_receipt + .readreceipt_update( + sender_user, + &body.room_id, + &ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + ) + .await; + } } if let Some(event) = &body.private_read_receipt { From 72f8cb30384a32b1489b676fcfc76fc59f323c42 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 28 Jun 2025 22:53:25 +0100 Subject: [PATCH 21/52] feat: Do not allow suspended users to send typing statuses --- src/api/client/typing.rs | 67 ++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/src/api/client/typing.rs b/src/api/client/typing.rs index 1d8d02fd..7b0df538 100644 --- a/src/api/client/typing.rs +++ b/src/api/client/typing.rs @@ -26,41 +26,42 @@ pub(crate) async fn create_typing_event_route( { return Err!(Request(Forbidden("You are not in this room."))); } - - match body.state { - | Typing::Yes(duration) => { - let duration = utils::clamp( - duration.as_millis().try_into().unwrap_or(u64::MAX), + if !services.users.is_suspended(sender_user).await? { + match body.state { + | Typing::Yes(duration) => { + let duration = utils::clamp( + duration.as_millis().try_into().unwrap_or(u64::MAX), + services + .server + .config + .typing_client_timeout_min_s + .try_mul(1000)?, + services + .server + .config + .typing_client_timeout_max_s + .try_mul(1000)?, + ); services - .server - .config - .typing_client_timeout_min_s - .try_mul(1000)?, + .rooms + .typing + .typing_add( + sender_user, + &body.room_id, + utils::millis_since_unix_epoch() + .checked_add(duration) + .expect("user typing timeout should not get this high"), + ) + .await?; + }, + | _ => { services - .server - .config - .typing_client_timeout_max_s - .try_mul(1000)?, - ); - services - .rooms - .typing - .typing_add( - sender_user, - &body.room_id, - utils::millis_since_unix_epoch() - .checked_add(duration) - .expect("user typing timeout should not get this high"), - ) - .await?; - }, - | _ => { - services - .rooms - .typing - .typing_remove(sender_user, &body.room_id) - .await?; - }, + .rooms + .typing + .typing_remove(sender_user, &body.room_id) + .await?; + }, + } } // ping presence From eb2e3b3bb70af18ad0821d046fe51926977b5d0a Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 01:52:02 +0100 Subject: [PATCH 22/52] fix: Missing suspensions shouldn't error Turns out copying and pasting the function above verbatim actually introduces more problems than it solves! --- src/service/users/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index f6a98858..d80a7e22 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -2,7 +2,7 @@ use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ Err, Error, Result, Server, at, debug_warn, err, trace, - utils::{self, ReadyExt, stream::TryIgnore, string::Unquoted}, + utils::{self, ReadyExt, TryFutureExtExt, stream::TryIgnore, string::Unquoted}, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; use futures::{Stream, StreamExt, TryFutureExt}; @@ -176,8 +176,7 @@ impl Service { self.db .userid_suspended .get(user_id) - .map_ok(|val| !val.is_empty()) - .map_err(|_| err!(Request(NotFound("User does not exist.")))) + .map_ok_or(Ok(false), |_| Ok(true)) .await } From d8a27eeb54b79a1126dd08c48f8f20d9cc0c0104 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 02:28:04 +0100 Subject: [PATCH 23/52] fix: Failing open on database errors oops --- src/service/users/mod.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index d80a7e22..5db7dc1d 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,11 +1,11 @@ use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ - Err, Error, Result, Server, at, debug_warn, err, trace, + Err, Error, Result, Server, at, debug_warn, err, result, trace, utils::{self, ReadyExt, TryFutureExtExt, stream::TryIgnore, string::Unquoted}, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; -use futures::{Stream, StreamExt, TryFutureExt}; +use futures::{Stream, StreamExt, TryFutureExt, TryStreamExt}; use ruma::{ DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, @@ -176,7 +176,19 @@ impl Service { self.db .userid_suspended .get(user_id) - .map_ok_or(Ok(false), |_| Ok(true)) + .map_ok_or_else( + |err| { + if err.is_not_found() { + Ok(false) + } else { + err!(Database(error!( + "Failed to check if user {user_id} is suspended: {err}" + ))); + Ok(true) + } + }, + |_| Ok(true), + ) .await } From 13e17d52e02f33847bed089d4451d248694be854 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 02:30:52 +0100 Subject: [PATCH 24/52] style: Remove unnecessary imports (clippy) --- src/service/users/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 5db7dc1d..b2a42959 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,11 +1,11 @@ use std::{collections::BTreeMap, mem, sync::Arc}; use conduwuit::{ - Err, Error, Result, Server, at, debug_warn, err, result, trace, - utils::{self, ReadyExt, TryFutureExtExt, stream::TryIgnore, string::Unquoted}, + Err, Error, Result, Server, at, debug_warn, err, trace, + utils::{self, ReadyExt, stream::TryIgnore, string::Unquoted}, }; use database::{Deserialized, Ignore, Interfix, Json, Map}; -use futures::{Stream, StreamExt, TryFutureExt, TryStreamExt}; +use futures::{Stream, StreamExt, TryFutureExt}; use ruma::{ DeviceId, KeyId, MilliSecondsSinceUnixEpoch, OneTimeKeyAlgorithm, OneTimeKeyId, OneTimeKeyName, OwnedDeviceId, OwnedKeyId, OwnedMxcUri, OwnedUserId, RoomId, UInt, UserId, From ecc6fda98b135e9e5fdb9cd93124baac7a4fc001 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 29 Jun 2025 15:07:04 +0100 Subject: [PATCH 25/52] feat: Record metadata about user suspensions --- src/admin/user/commands.rs | 6 +++- src/database/maps.rs | 2 +- src/service/users/mod.rs | 56 ++++++++++++++++++++++++-------------- 3 files changed, 42 insertions(+), 22 deletions(-) diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 61f10a86..ad2d1c78 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -238,7 +238,11 @@ pub(super) async fn suspend(&self, user_id: String) -> Result { if self.services.users.is_admin(&user_id).await { return Err!("Admin users cannot be suspended."); } - self.services.users.suspend_account(&user_id).await; + // TODO: Record the actual user that sent the suspension where possible + self.services + .users + .suspend_account(&user_id, self.services.globals.server_user.as_ref()) + .await; self.write_str(&format!("User {user_id} has been suspended.")) .await diff --git a/src/database/maps.rs b/src/database/maps.rs index 91ba2ebe..214dbf34 100644 --- a/src/database/maps.rs +++ b/src/database/maps.rs @@ -379,7 +379,7 @@ pub(super) static MAPS: &[Descriptor] = &[ ..descriptor::RANDOM }, Descriptor { - name: "userid_suspended", + name: "userid_suspension", ..descriptor::RANDOM_SMALL }, Descriptor { diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index b2a42959..d2dfccd9 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -16,10 +16,21 @@ use ruma::{ }, serde::Raw, }; +use serde::{Deserialize, Serialize}; use serde_json::json; use crate::{Dep, account_data, admin, globals, rooms}; +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct UserSuspension { + /// Whether the user is currently suspended + pub suspended: bool, + /// When the user was suspended (Unix timestamp in milliseconds) + pub suspended_at: u64, + /// User ID of who suspended this user + pub suspended_by: String, +} + pub struct Service { services: Services, db: Data, @@ -52,7 +63,7 @@ struct Data { userid_lastonetimekeyupdate: Arc, userid_masterkeyid: Arc, userid_password: Arc, - userid_suspended: Arc, + userid_suspension: Arc, userid_selfsigningkeyid: Arc, userid_usersigningkeyid: Arc, useridprofilekey_value: Arc, @@ -88,7 +99,7 @@ impl crate::Service for Service { userid_lastonetimekeyupdate: args.db["userid_lastonetimekeyupdate"].clone(), userid_masterkeyid: args.db["userid_masterkeyid"].clone(), userid_password: args.db["userid_password"].clone(), - userid_suspended: args.db["userid_suspended"].clone(), + userid_suspension: args.db["userid_suspension"].clone(), userid_selfsigningkeyid: args.db["userid_selfsigningkeyid"].clone(), userid_usersigningkeyid: args.db["userid_usersigningkeyid"].clone(), useridprofilekey_value: args.db["useridprofilekey_value"].clone(), @@ -146,13 +157,20 @@ impl Service { } /// Suspend account, placing it in a read-only state - pub async fn suspend_account(&self, user_id: &UserId) { - self.db.userid_suspended.insert(user_id, "1"); + pub async fn suspend_account(&self, user_id: &UserId, suspending_user: &UserId) { + self.db.userid_suspension.raw_put( + user_id, + Json(UserSuspension { + suspended: true, + suspended_at: MilliSecondsSinceUnixEpoch::now().get().into(), + suspended_by: suspending_user.to_string(), + }), + ); } /// Unsuspend account, placing it in a read-write state pub async fn unsuspend_account(&self, user_id: &UserId) { - self.db.userid_suspended.remove(user_id); + self.db.userid_suspension.remove(user_id); } /// Check if a user has an account on this homeserver. @@ -173,23 +191,21 @@ impl Service { /// Check if account is suspended pub async fn is_suspended(&self, user_id: &UserId) -> Result { - self.db - .userid_suspended + match self + .db + .userid_suspension .get(user_id) - .map_ok_or_else( - |err| { - if err.is_not_found() { - Ok(false) - } else { - err!(Database(error!( - "Failed to check if user {user_id} is suspended: {err}" - ))); - Ok(true) - } - }, - |_| Ok(true), - ) .await + .deserialized::() + { + | Ok(s) => Ok(s.suspended), + | Err(e) => + if e.is_not_found() { + Ok(false) + } else { + Err(e) + }, + } } /// Check if account is active, infallible From acb74faa070801c4ee48cc76f6d3dd19174876b9 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 29 Jun 2025 15:17:27 +0100 Subject: [PATCH 26/52] feat: Pass sender through admin commands --- src/admin/context.rs | 15 ++++++++++++++- src/admin/processor.rs | 1 + src/admin/user/commands.rs | 2 +- src/service/admin/mod.rs | 23 ++++++++++++++++++++--- src/service/rooms/timeline/mod.rs | 8 +++++--- 5 files changed, 41 insertions(+), 8 deletions(-) diff --git a/src/admin/context.rs b/src/admin/context.rs index 270537be..b453d88f 100644 --- a/src/admin/context.rs +++ b/src/admin/context.rs @@ -7,13 +7,14 @@ use futures::{ io::{AsyncWriteExt, BufWriter}, lock::Mutex, }; -use ruma::EventId; +use ruma::{EventId, UserId}; pub(crate) struct Context<'a> { pub(crate) services: &'a Services, pub(crate) body: &'a [&'a str], pub(crate) timer: SystemTime, pub(crate) reply_id: Option<&'a EventId>, + pub(crate) sender: Option<&'a UserId>, pub(crate) output: Mutex>>, } @@ -36,4 +37,16 @@ impl Context<'_> { output.write_all(s.as_bytes()).map_err(Into::into).await }) } + + /// Get the sender of the admin command, if available + pub(crate) fn sender(&self) -> Option<&UserId> { self.sender } + + /// Check if the command has sender information + pub(crate) fn has_sender(&self) -> bool { self.sender.is_some() } + + /// Get the sender as a string, or service user ID if not available + pub(crate) fn sender_or_service_user(&self) -> &UserId { + self.sender + .unwrap_or_else(|| self.services.globals.server_user.as_ref()) + } } diff --git a/src/admin/processor.rs b/src/admin/processor.rs index f7b7140f..8d1fe89c 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -63,6 +63,7 @@ async fn process_command(services: Arc, input: &CommandInput) -> Proce body: &body, timer: SystemTime::now(), reply_id: input.reply_id.as_deref(), + sender: input.sender.as_deref(), output: BufWriter::new(Vec::new()).into(), }; diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index ad2d1c78..d094fc5f 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -241,7 +241,7 @@ pub(super) async fn suspend(&self, user_id: String) -> Result { // TODO: Record the actual user that sent the suspension where possible self.services .users - .suspend_account(&user_id, self.services.globals.server_user.as_ref()) + .suspend_account(&user_id, self.sender_or_service_user()) .await; self.write_str(&format!("User {user_id} has been suspended.")) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 683f5400..86e12c3c 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -45,11 +45,13 @@ struct Services { services: StdRwLock>>, } -/// Inputs to a command are a multi-line string and optional reply_id. +/// Inputs to a command are a multi-line string, optional reply_id, and optional +/// sender. #[derive(Debug)] pub struct CommandInput { pub command: String, pub reply_id: Option, + pub sender: Option>, } /// Prototype of the tab-completer. The input is buffered text when tab @@ -162,7 +164,22 @@ impl Service { pub fn command(&self, command: String, reply_id: Option) -> Result<()> { self.channel .0 - .send(CommandInput { command, reply_id }) + .send(CommandInput { command, reply_id, sender: None }) + .map_err(|e| err!("Failed to enqueue admin command: {e:?}")) + } + + /// Posts a command to the command processor queue with sender information + /// and returns. Processing will take place on the service worker's task + /// asynchronously. Errors if the queue is full. + pub fn command_with_sender( + &self, + command: String, + reply_id: Option, + sender: Box, + ) -> Result<()> { + self.channel + .0 + .send(CommandInput { command, reply_id, sender: Some(sender) }) .map_err(|e| err!("Failed to enqueue admin command: {e:?}")) } @@ -173,7 +190,7 @@ impl Service { command: String, reply_id: Option, ) -> ProcessorResult { - self.process_command(CommandInput { command, reply_id }) + self.process_command(CommandInput { command, reply_id, sender: None }) .await } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 37963246..534d8faf 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -536,9 +536,11 @@ impl Service { self.services.search.index_pdu(shortroomid, &pdu_id, &body); if self.services.admin.is_admin_command(pdu, &body).await { - self.services - .admin - .command(body, Some((*pdu.event_id).into()))?; + self.services.admin.command_with_sender( + body, + Some((*pdu.event_id).into()), + pdu.sender.clone().into(), + )?; } } }, From d4862b8ead02a6ef61580ddefb24febb56c108b4 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sun, 29 Jun 2025 16:26:04 +0100 Subject: [PATCH 27/52] style: Remove redundant, unused functions --- src/admin/context.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/admin/context.rs b/src/admin/context.rs index b453d88f..3d3cffb7 100644 --- a/src/admin/context.rs +++ b/src/admin/context.rs @@ -38,12 +38,6 @@ impl Context<'_> { }) } - /// Get the sender of the admin command, if available - pub(crate) fn sender(&self) -> Option<&UserId> { self.sender } - - /// Check if the command has sender information - pub(crate) fn has_sender(&self) -> bool { self.sender.is_some() } - /// Get the sender as a string, or service user ID if not available pub(crate) fn sender_or_service_user(&self) -> &UserId { self.sender From ec9d3d613e3b3be018ac73c791c956ae44cc6611 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 29 Jun 2025 23:02:15 +0100 Subject: [PATCH 28/52] chore: Add funding --- .github/FUNDING.yml | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 00000000..321dc74c --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,3 @@ +github: [JadedBlueEyes] +# Doesn't support an array, so we can only list nex +ko_fi: nexy7574 From 17930708d86dd73c5d39dbe758d489621b2626e5 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sun, 29 Jun 2025 23:06:26 +0100 Subject: [PATCH 29/52] chore: Add second ko-fi as custom link --- .github/FUNDING.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 321dc74c..fcfaade5 100644 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1,3 +1,5 @@ github: [JadedBlueEyes] # Doesn't support an array, so we can only list nex ko_fi: nexy7574 +custom: + - https://ko-fi.com/JadedBlueEyes From 57ad8c7fef8b7c34451fb975e8a526e0290a8e10 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Tue, 3 Jun 2025 21:31:02 +0100 Subject: [PATCH 30/52] refactor: Promote handling unsigned data out of timeline Also fixes: - Transaction IDs leaking in event route - Age not being set for event relations or threads - Both of the above for search results Notes down concern with relations table --- src/admin/query/room_timeline.rs | 4 +-- src/api/client/context.rs | 16 ++++++++-- src/api/client/message.rs | 9 ++++-- src/api/client/relations.rs | 4 +++ src/api/client/room/event.rs | 2 +- src/api/client/room/initial_sync.rs | 9 +++++- src/api/client/sync/mod.rs | 13 ++++---- src/api/client/sync/v3.rs | 2 +- src/api/client/threads.rs | 3 ++ src/api/server/backfill.rs | 12 +++++++- src/core/matrix/pdu/unsigned.rs | 17 +++++++++-- src/service/rooms/pdu_metadata/data.rs | 5 +--- src/service/rooms/search/mod.rs | 7 ++++- src/service/rooms/threads/mod.rs | 4 +-- src/service/rooms/timeline/data.rs | 41 +++++++------------------- src/service/rooms/timeline/mod.rs | 26 +++++++--------- src/service/sending/sender.rs | 2 +- 17 files changed, 104 insertions(+), 72 deletions(-) diff --git a/src/admin/query/room_timeline.rs b/src/admin/query/room_timeline.rs index 0fd22ca7..58f75cb9 100644 --- a/src/admin/query/room_timeline.rs +++ b/src/admin/query/room_timeline.rs @@ -31,7 +31,7 @@ pub(super) async fn last(&self, room_id: OwnedRoomOrAliasId) -> Result { .services .rooms .timeline - .last_timeline_count(None, &room_id) + .last_timeline_count(&room_id) .await?; self.write_str(&format!("{result:#?}")).await @@ -52,7 +52,7 @@ pub(super) async fn pdus( .services .rooms .timeline - .pdus_rev(None, &room_id, from) + .pdus_rev(&room_id, from) .try_take(limit.unwrap_or(3)) .try_collect() .await?; diff --git a/src/api/client/context.rs b/src/api/client/context.rs index dbc2a22f..8c9ba64c 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -84,11 +84,18 @@ pub(crate) async fn get_context_route( let base_event = ignored_filter(&services, (base_count, base_pdu), sender_user); + // PDUs are used to get seen user IDs and then returned in response. + let events_before = services .rooms .timeline - .pdus_rev(Some(sender_user), room_id, Some(base_count)) + .pdus_rev(room_id, Some(base_count)) .ignore_err() + .then(async |mut pdu| { + pdu.1.set_unsigned(Some(sender_user)); + // TODO: bundled aggregations + pdu + }) .ready_filter_map(|item| event_filter(item, filter)) .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) @@ -98,8 +105,13 @@ pub(crate) async fn get_context_route( let events_after = services .rooms .timeline - .pdus(Some(sender_user), room_id, Some(base_count)) + .pdus(room_id, Some(base_count)) .ignore_err() + .then(async |mut pdu| { + pdu.1.set_unsigned(Some(sender_user)); + // TODO: bundled aggregations + pdu + }) .ready_filter_map(|item| event_filter(item, filter)) .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index e442850b..cd6b0a60 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -114,14 +114,14 @@ pub(crate) async fn get_message_events_route( | Direction::Forward => services .rooms .timeline - .pdus(Some(sender_user), room_id, Some(from)) + .pdus(room_id, Some(from)) .ignore_err() .boxed(), | Direction::Backward => services .rooms .timeline - .pdus_rev(Some(sender_user), room_id, Some(from)) + .pdus_rev(room_id, Some(from)) .ignore_err() .boxed(), }; @@ -132,6 +132,11 @@ pub(crate) async fn get_message_events_route( .wide_filter_map(|item| ignored_filter(&services, item, sender_user)) .wide_filter_map(|item| visibility_filter(&services, item, sender_user)) .take(limit) + .then(async |mut pdu| { + pdu.1.set_unsigned(Some(sender_user)); + // TODO: bundled aggregations + pdu + }) .collect() .await; diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index b8c2dd4d..5e323756 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -172,6 +172,10 @@ async fn paginate_relations_with_filter( }) } +// TODO: Can we move the visibility filter lower down, to avoid checking events +// that won't be sent? At the moment this also results in getting events that +// appear to have no relation because intermediaries are not visible to the +// user. async fn visibility_filter( services: &Services, sender_user: &UserId, diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index 2b115b5c..ec673d75 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -38,7 +38,7 @@ pub(crate) async fn get_room_event_route( "Fetched PDU must match requested" ); - event.add_age().ok(); + event.set_unsigned(body.sender_user.as_deref()); Ok(get_room_event::v3::Response { event: event.into_room_event() }) } diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index ca63610b..765d2a39 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -25,12 +25,19 @@ pub(crate) async fn room_initial_sync_route( return Err!(Request(Forbidden("No room preview available."))); } + // Events are returned in body + let limit = LIMIT_MAX; let events: Vec<_> = services .rooms .timeline - .pdus_rev(None, room_id, None) + .pdus_rev(room_id, None) .try_take(limit) + .and_then(async |mut pdu| { + pdu.1.set_unsigned(body.sender_user.as_deref()); + // TODO: bundled aggregations + Ok(pdu) + }) .try_collect() .await?; diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index 40370160..db581bd6 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -31,11 +31,7 @@ async fn load_timeline( next_batch: Option, limit: usize, ) -> Result<(Vec<(PduCount, PduEvent)>, bool), Error> { - let last_timeline_count = services - .rooms - .timeline - .last_timeline_count(Some(sender_user), room_id) - .await?; + let last_timeline_count = services.rooms.timeline.last_timeline_count(room_id).await?; if last_timeline_count <= roomsincecount { return Ok((Vec::new(), false)); @@ -44,8 +40,13 @@ async fn load_timeline( let non_timeline_pdus = services .rooms .timeline - .pdus_rev(Some(sender_user), room_id, None) + .pdus_rev(room_id, None) .ignore_err() + .map(move |mut pdu| { + pdu.1.set_unsigned(Some(sender_user)); + // TODO: bundled aggregations + pdu + }) .ready_skip_while(|&(pducount, _)| pducount > next_batch.unwrap_or_else(PduCount::max)) .ready_take_while(|&(pducount, _)| pducount > roomsincecount); diff --git a/src/api/client/sync/v3.rs b/src/api/client/sync/v3.rs index 7bc74c95..7eb8c7e1 100644 --- a/src/api/client/sync/v3.rs +++ b/src/api/client/sync/v3.rs @@ -1189,7 +1189,7 @@ async fn calculate_heroes( services .rooms .timeline - .all_pdus(sender_user, room_id) + .all_pdus(room_id) .ready_filter(|(_, pdu)| pdu.kind == RoomMember) .fold_default(|heroes: Vec<_>, (_, pdu)| { fold_hero(heroes, services, room_id, sender_user, pdu) diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index 5b838bef..404da4f8 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -28,6 +28,9 @@ pub(crate) async fn get_threads_route( .transpose()? .unwrap_or_else(PduCount::max); + // TODO: bundled aggregation + // TODO: user_can_see_event and set_unsigned should be at the same level / + // function, so unsigned is only set for seen events. let threads: Vec<(PduCount, PduEvent)> = services .rooms .threads diff --git a/src/api/server/backfill.rs b/src/api/server/backfill.rs index 3cfbcedc..058fc273 100644 --- a/src/api/server/backfill.rs +++ b/src/api/server/backfill.rs @@ -3,6 +3,7 @@ use std::cmp; use axum::extract::State; use conduwuit::{ PduCount, Result, + result::LogErr, utils::{IterStream, ReadyExt, stream::TryTools}, }; use futures::{FutureExt, StreamExt, TryStreamExt}; @@ -62,7 +63,7 @@ pub(crate) async fn get_backfill_route( pdus: services .rooms .timeline - .pdus_rev(None, &body.room_id, Some(from.saturating_add(1))) + .pdus_rev(&body.room_id, Some(from.saturating_add(1))) .try_take(limit) .try_filter_map(|(_, pdu)| async move { Ok(services @@ -72,6 +73,15 @@ pub(crate) async fn get_backfill_route( .await .then_some(pdu)) }) + .and_then(async |mut pdu| { + // Strip the transaction ID, as that is private + pdu.remove_transaction_id().log_err().ok(); + // Add age, as this is specified + pdu.add_age().log_err().ok(); + // It's not clear if we should strip or add any more data, leave as is. + // In particular: Redaction? + Ok(pdu) + }) .try_filter_map(|pdu| async move { Ok(services .rooms diff --git a/src/core/matrix/pdu/unsigned.rs b/src/core/matrix/pdu/unsigned.rs index 23897519..2726a292 100644 --- a/src/core/matrix/pdu/unsigned.rs +++ b/src/core/matrix/pdu/unsigned.rs @@ -1,11 +1,24 @@ -use std::collections::BTreeMap; +use std::{borrow::Borrow, collections::BTreeMap}; use ruma::MilliSecondsSinceUnixEpoch; use serde::Deserialize; use serde_json::value::{RawValue as RawJsonValue, Value as JsonValue, to_raw_value}; use super::Pdu; -use crate::{Result, err, implement, is_true}; +use crate::{Result, err, implement, is_true, result::LogErr}; + +/// Set the `unsigned` field of the PDU using only information in the PDU. +/// Some unsigned data is already set within the database (eg. prev events, +/// threads). Once this is done, other data must be calculated from the database +/// (eg. relations) This is for server-to-client events. +/// Backfill handles this itself. +#[implement(Pdu)] +pub fn set_unsigned(&mut self, user_id: Option<&ruma::UserId>) { + if Some(self.sender.borrow()) != user_id { + self.remove_transaction_id().log_err().ok(); + } + self.add_age().log_err().ok(); +} #[implement(Pdu)] pub fn remove_transaction_id(&mut self) -> Result { diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index f0beab5a..c4b37b99 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -3,7 +3,6 @@ use std::{mem::size_of, sync::Arc}; use conduwuit::{ PduCount, PduEvent, arrayvec::ArrayVec, - result::LogErr, utils::{ ReadyExt, stream::{TryIgnore, WidebandExt}, @@ -80,9 +79,7 @@ impl Data { let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?; - if pdu.sender != user_id { - pdu.remove_transaction_id().log_err().ok(); - } + pdu.set_unsigned(Some(user_id)); Some((shorteventid, pdu)) }) diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 4100dd75..7cef5dbf 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -127,7 +127,12 @@ pub async fn search_pdus<'a>( .then_some(pdu) }) .skip(query.skip) - .take(query.limit); + .take(query.limit) + .map(move |mut pdu| { + pdu.set_unsigned(query.user_id); + // TODO: bundled aggregation + pdu + }); Ok((count, pdus)) } diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index a680df55..a24183e6 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -160,9 +160,7 @@ impl Service { let mut pdu = self.services.timeline.get_pdu_from_id(&pdu_id).await.ok()?; let pdu_id: PduId = pdu_id.into(); - if pdu.sender != user_id { - pdu.remove_transaction_id().ok(); - } + pdu.set_unsigned(Some(user_id)); Some((pdu_id.shorteventid, pdu)) }); diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 94c78bb0..5f7b8c81 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,14 +1,11 @@ -use std::{borrow::Borrow, sync::Arc}; +use std::sync::Arc; use conduwuit::{ - Err, PduCount, PduEvent, Result, at, err, - result::{LogErr, NotFound}, - utils, - utils::stream::TryReadyExt, + Err, PduCount, PduEvent, Result, at, err, result::NotFound, utils, utils::stream::TryReadyExt, }; use database::{Database, Deserialized, Json, KeyVal, Map}; use futures::{FutureExt, Stream, TryFutureExt, TryStreamExt, future::select_ok, pin_mut}; -use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId, api::Direction}; +use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, api::Direction}; use super::{PduId, RawPduId}; use crate::{Dep, rooms, rooms::short::ShortRoomId}; @@ -46,12 +43,8 @@ impl Data { } #[inline] - pub(super) async fn last_timeline_count( - &self, - sender_user: Option<&UserId>, - room_id: &RoomId, - ) -> Result { - let pdus_rev = self.pdus_rev(sender_user, room_id, PduCount::max()); + pub(super) async fn last_timeline_count(&self, room_id: &RoomId) -> Result { + let pdus_rev = self.pdus_rev(room_id, PduCount::max()); pin_mut!(pdus_rev); let last_count = pdus_rev @@ -65,12 +58,8 @@ impl Data { } #[inline] - pub(super) async fn latest_pdu_in_room( - &self, - sender_user: Option<&UserId>, - room_id: &RoomId, - ) -> Result { - let pdus_rev = self.pdus_rev(sender_user, room_id, PduCount::max()); + pub(super) async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result { + let pdus_rev = self.pdus_rev(room_id, PduCount::max()); pin_mut!(pdus_rev); pdus_rev @@ -223,7 +212,6 @@ impl Data { /// order. pub(super) fn pdus_rev<'a>( &'a self, - user_id: Option<&'a UserId>, room_id: &'a RoomId, until: PduCount, ) -> impl Stream> + Send + 'a { @@ -233,14 +221,13 @@ impl Data { self.pduid_pdu .rev_raw_stream_from(¤t) .ready_try_take_while(move |(key, _)| Ok(key.starts_with(&prefix))) - .ready_and_then(move |item| Self::each_pdu(item, user_id)) + .ready_and_then(Self::from_json_slice) }) .try_flatten_stream() } pub(super) fn pdus<'a>( &'a self, - user_id: Option<&'a UserId>, room_id: &'a RoomId, from: PduCount, ) -> impl Stream> + Send + 'a { @@ -250,21 +237,15 @@ impl Data { self.pduid_pdu .raw_stream_from(¤t) .ready_try_take_while(move |(key, _)| Ok(key.starts_with(&prefix))) - .ready_and_then(move |item| Self::each_pdu(item, user_id)) + .ready_and_then(Self::from_json_slice) }) .try_flatten_stream() } - fn each_pdu((pdu_id, pdu): KeyVal<'_>, user_id: Option<&UserId>) -> Result { + fn from_json_slice((pdu_id, pdu): KeyVal<'_>) -> Result { let pdu_id: RawPduId = pdu_id.into(); - let mut pdu = serde_json::from_slice::(pdu)?; - - if Some(pdu.sender.borrow()) != user_id { - pdu.remove_transaction_id().log_err().ok(); - } - - pdu.add_age().log_err().ok(); + let pdu = serde_json::from_slice::(pdu)?; Ok((pdu_id.pdu_count(), pdu)) } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 534d8faf..ce99a829 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -165,7 +165,7 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] pub async fn first_item_in_room(&self, room_id: &RoomId) -> Result<(PduCount, PduEvent)> { - let pdus = self.pdus(None, room_id, None); + let pdus = self.pdus(room_id, None); pin_mut!(pdus); pdus.try_next() @@ -175,16 +175,12 @@ impl Service { #[tracing::instrument(skip(self), level = "debug")] pub async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result { - self.db.latest_pdu_in_room(None, room_id).await + self.db.latest_pdu_in_room(room_id).await } #[tracing::instrument(skip(self), level = "debug")] - pub async fn last_timeline_count( - &self, - sender_user: Option<&UserId>, - room_id: &RoomId, - ) -> Result { - self.db.last_timeline_count(sender_user, room_id).await + pub async fn last_timeline_count(&self, room_id: &RoomId) -> Result { + self.db.last_timeline_count(room_id).await } /// Returns the `count` of this pdu's id. @@ -547,6 +543,10 @@ impl Service { | _ => {}, } + // CONCERN: If we receive events with a relation out-of-order, we never write + // their relation / thread. We need some kind of way to trigger when we receive + // this event, and potentially a way to rebuild the table entirely. + if let Ok(content) = pdu.get_content::() { if let Ok(related_pducount) = self.get_pdu_count(&content.relates_to.event_id).await { self.services @@ -1010,34 +1010,30 @@ impl Service { #[inline] pub fn all_pdus<'a>( &'a self, - user_id: &'a UserId, room_id: &'a RoomId, ) -> impl Stream + Send + 'a { - self.pdus(Some(user_id), room_id, None).ignore_err() + self.pdus(room_id, None).ignore_err() } /// Reverse iteration starting at from. #[tracing::instrument(skip(self), level = "debug")] pub fn pdus_rev<'a>( &'a self, - user_id: Option<&'a UserId>, room_id: &'a RoomId, until: Option, ) -> impl Stream> + Send + 'a { self.db - .pdus_rev(user_id, room_id, until.unwrap_or_else(PduCount::max)) + .pdus_rev(room_id, until.unwrap_or_else(PduCount::max)) } /// Forward iteration starting at from. #[tracing::instrument(skip(self), level = "debug")] pub fn pdus<'a>( &'a self, - user_id: Option<&'a UserId>, room_id: &'a RoomId, from: Option, ) -> impl Stream> + Send + 'a { - self.db - .pdus(user_id, room_id, from.unwrap_or_else(PduCount::min)) + self.db.pdus(room_id, from.unwrap_or_else(PduCount::min)) } /// Replace a PDU with the redacted form. diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index fab02f6b..cd84f7e7 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -781,7 +781,7 @@ impl Service { for pdu in pdus { // Redacted events are not notification targets (we don't send push for them) - if pdu.contains_unsigned_property("redacted_because", serde_json::Value::is_string) { + if pdu.is_redacted() { continue; } From e1127c5d702d6aacf1e675617735101ccf5f36b1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 4 Jun 2025 00:11:09 +0100 Subject: [PATCH 31/52] feat: Add bundled aggregations support Add support for the m.replace and m.reference bundled aggregations. This should fix plenty of subtle client issues. Threads are not included in the new code as they have historically been written to the database. Replacing the old system would result in issues when switching away from continuwuity, so saved for later. Some TODOs have been left re event visibility and ignored users. These should be OK for now, though. --- src/api/client/context.rs | 18 +- src/api/client/message.rs | 11 +- src/api/client/relations.rs | 13 +- src/api/client/room/event.rs | 11 +- src/api/client/room/initial_sync.rs | 13 +- src/api/client/search.rs | 13 +- src/api/client/sync/mod.rs | 18 +- src/api/client/threads.rs | 14 +- .../pdu_metadata/bundled_aggregations.rs | 394 ++++++++++++++++++ src/service/rooms/pdu_metadata/mod.rs | 1 + 10 files changed, 491 insertions(+), 15 deletions(-) create mode 100644 src/service/rooms/pdu_metadata/bundled_aggregations.rs diff --git a/src/api/client/context.rs b/src/api/client/context.rs index 8c9ba64c..ee3a458c 100644 --- a/src/api/client/context.rs +++ b/src/api/client/context.rs @@ -93,7 +93,14 @@ pub(crate) async fn get_context_route( .ignore_err() .then(async |mut pdu| { pdu.1.set_unsigned(Some(sender_user)); - // TODO: bundled aggregations + if let Err(e) = services + .rooms + .pdu_metadata + .add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1) + .await + { + debug_warn!("Failed to add bundled aggregations: {e}"); + } pdu }) .ready_filter_map(|item| event_filter(item, filter)) @@ -109,7 +116,14 @@ pub(crate) async fn get_context_route( .ignore_err() .then(async |mut pdu| { pdu.1.set_unsigned(Some(sender_user)); - // TODO: bundled aggregations + if let Err(e) = services + .rooms + .pdu_metadata + .add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1) + .await + { + debug_warn!("Failed to add bundled aggregations: {e}"); + } pdu }) .ready_filter_map(|item| event_filter(item, filter)) diff --git a/src/api/client/message.rs b/src/api/client/message.rs index cd6b0a60..6087478c 100644 --- a/src/api/client/message.rs +++ b/src/api/client/message.rs @@ -2,7 +2,7 @@ use core::panic; use axum::extract::State; use conduwuit::{ - Err, Result, at, + Err, Result, at, debug_warn, matrix::{ Event, pdu::{PduCount, PduEvent}, @@ -134,7 +134,14 @@ pub(crate) async fn get_message_events_route( .take(limit) .then(async |mut pdu| { pdu.1.set_unsigned(Some(sender_user)); - // TODO: bundled aggregations + if let Err(e) = services + .rooms + .pdu_metadata + .add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1) + .await + { + debug_warn!("Failed to add bundled aggregations: {e}"); + } pdu }) .collect() diff --git a/src/api/client/relations.rs b/src/api/client/relations.rs index 5e323756..377f0c71 100644 --- a/src/api/client/relations.rs +++ b/src/api/client/relations.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - Result, at, + Result, at, debug_warn, matrix::pdu::PduCount, utils::{IterStream, ReadyExt, result::FlatOk, stream::WidebandExt}, }; @@ -149,6 +149,17 @@ async fn paginate_relations_with_filter( .ready_take_while(|(count, _)| Some(*count) != to) .wide_filter_map(|item| visibility_filter(services, sender_user, item)) .take(limit) + .then(async |mut pdu| { + if let Err(e) = services + .rooms + .pdu_metadata + .add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1) + .await + { + debug_warn!("Failed to add bundled aggregations to relation: {e}"); + } + pdu + }) .collect() .await; diff --git a/src/api/client/room/event.rs b/src/api/client/room/event.rs index ec673d75..e7b0bb37 100644 --- a/src/api/client/room/event.rs +++ b/src/api/client/room/event.rs @@ -1,5 +1,5 @@ use axum::extract::State; -use conduwuit::{Err, Event, Result, err}; +use conduwuit::{Err, Event, Result, debug_warn, err}; use futures::{FutureExt, TryFutureExt, future::try_join}; use ruma::api::client::room::get_room_event; @@ -38,6 +38,15 @@ pub(crate) async fn get_room_event_route( "Fetched PDU must match requested" ); + if let Err(e) = services + .rooms + .pdu_metadata + .add_bundled_aggregations_to_pdu(body.sender_user(), &mut event) + .await + { + debug_warn!("Failed to add bundled aggregations to event: {e}"); + } + event.set_unsigned(body.sender_user.as_deref()); Ok(get_room_event::v3::Response { event: event.into_room_event() }) diff --git a/src/api/client/room/initial_sync.rs b/src/api/client/room/initial_sync.rs index 765d2a39..2f965245 100644 --- a/src/api/client/room/initial_sync.rs +++ b/src/api/client/room/initial_sync.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - Err, PduEvent, Result, at, + Err, PduEvent, Result, at, debug_warn, utils::{BoolExt, stream::TryTools}, }; use futures::TryStreamExt; @@ -35,7 +35,16 @@ pub(crate) async fn room_initial_sync_route( .try_take(limit) .and_then(async |mut pdu| { pdu.1.set_unsigned(body.sender_user.as_deref()); - // TODO: bundled aggregations + if let Some(sender_user) = body.sender_user.as_deref() { + if let Err(e) = services + .rooms + .pdu_metadata + .add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1) + .await + { + debug_warn!("Failed to add bundled aggregations: {e}"); + } + } Ok(pdu) }) .try_collect() diff --git a/src/api/client/search.rs b/src/api/client/search.rs index d4dcde57..af5fccec 100644 --- a/src/api/client/search.rs +++ b/src/api/client/search.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use axum::extract::State; use conduwuit::{ - Err, Result, at, is_true, + Err, Result, at, debug_warn, is_true, matrix::pdu::PduEvent, result::FlatOk, utils::{IterStream, stream::ReadyExt}, @@ -144,6 +144,17 @@ async fn category_room_events( .map(at!(2)) .flatten() .stream() + .then(|mut pdu| async { + if let Err(e) = services + .rooms + .pdu_metadata + .add_bundled_aggregations_to_pdu(sender_user, &mut pdu) + .await + { + debug_warn!("Failed to add bundled aggregations to search result: {e}"); + } + pdu + }) .map(PduEvent::into_room_event) .map(|result| SearchResult { rank: None, diff --git a/src/api/client/sync/mod.rs b/src/api/client/sync/mod.rs index db581bd6..1ea62883 100644 --- a/src/api/client/sync/mod.rs +++ b/src/api/client/sync/mod.rs @@ -3,7 +3,7 @@ mod v4; mod v5; use conduwuit::{ - Error, PduCount, Result, + Error, PduCount, Result, debug_warn, matrix::pdu::PduEvent, utils::stream::{BroadbandExt, ReadyExt, TryIgnore}, }; @@ -42,13 +42,23 @@ async fn load_timeline( .timeline .pdus_rev(room_id, None) .ignore_err() + .ready_skip_while(|&(pducount, _)| pducount > next_batch.unwrap_or_else(PduCount::max)) + .ready_take_while(|&(pducount, _)| pducount > roomsincecount) .map(move |mut pdu| { pdu.1.set_unsigned(Some(sender_user)); - // TODO: bundled aggregations pdu }) - .ready_skip_while(|&(pducount, _)| pducount > next_batch.unwrap_or_else(PduCount::max)) - .ready_take_while(|&(pducount, _)| pducount > roomsincecount); + .then(async move |mut pdu| { + if let Err(e) = services + .rooms + .pdu_metadata + .add_bundled_aggregations_to_pdu(sender_user, &mut pdu.1) + .await + { + debug_warn!("Failed to add bundled aggregations: {e}"); + } + pdu + }); // Take the last events for the timeline pin_mut!(non_timeline_pdus); diff --git a/src/api/client/threads.rs b/src/api/client/threads.rs index 404da4f8..09fb75d6 100644 --- a/src/api/client/threads.rs +++ b/src/api/client/threads.rs @@ -1,6 +1,6 @@ use axum::extract::State; use conduwuit::{ - Result, at, + Result, at, debug_warn, matrix::pdu::{PduCount, PduEvent}, }; use futures::StreamExt; @@ -28,7 +28,6 @@ pub(crate) async fn get_threads_route( .transpose()? .unwrap_or_else(PduCount::max); - // TODO: bundled aggregation // TODO: user_can_see_event and set_unsigned should be at the same level / // function, so unsigned is only set for seen events. let threads: Vec<(PduCount, PduEvent)> = services @@ -45,6 +44,17 @@ pub(crate) async fn get_threads_route( .await .then_some((count, pdu)) }) + .then(|(count, mut pdu)| async move { + if let Err(e) = services + .rooms + .pdu_metadata + .add_bundled_aggregations_to_pdu(body.sender_user(), &mut pdu) + .await + { + debug_warn!("Failed to add bundled aggregations to thread: {e}"); + } + (count, pdu) + }) .collect() .await; diff --git a/src/service/rooms/pdu_metadata/bundled_aggregations.rs b/src/service/rooms/pdu_metadata/bundled_aggregations.rs new file mode 100644 index 00000000..4ef8efc1 --- /dev/null +++ b/src/service/rooms/pdu_metadata/bundled_aggregations.rs @@ -0,0 +1,394 @@ +use conduwuit::{Event, PduEvent, Result, err}; +use ruma::{ + EventId, RoomId, UserId, + api::Direction, + events::relation::{BundledMessageLikeRelations, BundledReference, ReferenceChunk}, +}; + +use super::PdusIterItem; + +const MAX_BUNDLED_RELATIONS: usize = 50; + +impl super::Service { + /// Gets bundled aggregations for an event according to the Matrix + /// specification. + /// - m.replace relations are bundled to include the most recent replacement + /// event. + /// - m.reference relations are bundled to include a chunk of event IDs. + #[tracing::instrument(skip(self), level = "debug")] + pub async fn get_bundled_aggregations( + &self, + user_id: &UserId, + room_id: &RoomId, + event_id: &EventId, + ) -> Result>>> { + let relations = self + .get_relations( + user_id, + room_id, + event_id, + conduwuit::PduCount::max(), + MAX_BUNDLED_RELATIONS, + 0, + Direction::Backward, + ) + .await; + // The relations database code still handles the basic unsigned data + // We don't want to recursively fetch relations + + // TODO: Event visibility check + // TODO: ignored users? + + if relations.is_empty() { + return Ok(None); + } + + let mut replace_events = Vec::with_capacity(relations.len().min(10)); // Most events have few replacements + let mut reference_events = Vec::with_capacity(relations.len()); + + for relation in &relations { + let pdu = &relation.1; + + let content = pdu.get_content_as_value(); + if let Some(relates_to) = content.get("m.relates_to") { + // We don't check that the event relates back, because we assume the database is + // good. + if let Some(rel_type) = relates_to.get("rel_type") { + match rel_type.as_str() { + | Some("m.replace") => { + replace_events.push(relation); + }, + | Some("m.reference") => { + reference_events.push(relation); + }, + | _ => { + // Ignore other relation types for now + // Threads are in the database but not handled here + // Other types are not specified AFAICT. + }, + } + } + } + } + + // If no relations to bundle, return None + if replace_events.is_empty() && reference_events.is_empty() { + return Ok(None); + } + + let mut bundled = BundledMessageLikeRelations::new(); + + // Handle m.replace relations - find the most recent one + if !replace_events.is_empty() { + let most_recent_replacement = Self::find_most_recent_replacement(&replace_events)?; + + // Convert the replacement event to the bundled format + if let Some(replacement_pdu) = most_recent_replacement { + // According to the Matrix spec, we should include the full event as raw JSON + let replacement_json = serde_json::to_string(replacement_pdu) + .map_err(|e| err!(Database("Failed to serialize replacement event: {e}")))?; + let raw_value = serde_json::value::RawValue::from_string(replacement_json) + .map_err(|e| err!(Database("Failed to create RawValue: {e}")))?; + bundled.replace = Some(Box::new(raw_value)); + } + } + + // Handle m.reference relations - collect event IDs + if !reference_events.is_empty() { + let reference_chunk = Self::build_reference_chunk(&reference_events)?; + if !reference_chunk.is_empty() { + bundled.reference = Some(Box::new(ReferenceChunk::new(reference_chunk))); + } + } + + // TODO: Handle other relation types (m.annotation, etc.) when specified + + Ok(Some(bundled)) + } + + /// Build reference chunk for m.reference bundled aggregations + fn build_reference_chunk( + reference_events: &[&PdusIterItem], + ) -> Result> { + let mut chunk = Vec::with_capacity(reference_events.len()); + + for relation in reference_events { + let pdu = &relation.1; + + let reference_entry = BundledReference::new(pdu.event_id().to_owned()); + chunk.push(reference_entry); + } + + // Don't sort, order is unspecified + + Ok(chunk) + } + + /// Find the most recent replacement event based on origin_server_ts and + /// lexicographic event_id ordering + fn find_most_recent_replacement<'a>( + replacement_events: &'a [&'a PdusIterItem], + ) -> Result> { + if replacement_events.is_empty() { + return Ok(None); + } + + let mut most_recent: Option<&PduEvent> = None; + + // Jank, is there a better way to do this? + for relation in replacement_events { + let pdu = &relation.1; + + match most_recent { + | None => { + most_recent = Some(pdu); + }, + | Some(current_most_recent) => { + // Compare by origin_server_ts first + match pdu + .origin_server_ts() + .cmp(¤t_most_recent.origin_server_ts()) + { + | std::cmp::Ordering::Greater => { + most_recent = Some(pdu); + }, + | std::cmp::Ordering::Equal => { + // If timestamps are equal, use lexicographic ordering of event_id + if pdu.event_id() > current_most_recent.event_id() { + most_recent = Some(pdu); + } + }, + | std::cmp::Ordering::Less => { + // Keep current most recent + }, + } + }, + } + } + + Ok(most_recent) + } + + /// Adds bundled aggregations to a PDU's unsigned field + #[tracing::instrument(skip(self, pdu), level = "debug")] + pub async fn add_bundled_aggregations_to_pdu( + &self, + user_id: &UserId, + pdu: &mut PduEvent, + ) -> Result<()> { + if pdu.is_redacted() { + return Ok(()); + } + + let bundled_aggregations = self + .get_bundled_aggregations(user_id, pdu.room_id(), pdu.event_id()) + .await?; + + if let Some(aggregations) = bundled_aggregations { + let aggregations_json = serde_json::to_value(aggregations) + .map_err(|e| err!(Database("Failed to serialize bundled aggregations: {e}")))?; + + Self::add_bundled_aggregations_to_unsigned(pdu, aggregations_json)?; + } + + Ok(()) + } + + /// Helper method to add bundled aggregations to a PDU's unsigned + /// field + fn add_bundled_aggregations_to_unsigned( + pdu: &mut PduEvent, + aggregations_json: serde_json::Value, + ) -> Result<()> { + use serde_json::{ + Map, Value as JsonValue, + value::{RawValue as RawJsonValue, to_raw_value}, + }; + + let mut unsigned: Map = pdu + .unsigned + .as_deref() + .map(RawJsonValue::get) + .map_or_else(|| Ok(Map::new()), serde_json::from_str) + .map_err(|e| err!(Database("Invalid unsigned in pdu event: {e}")))?; + + let relations = unsigned + .entry("m.relations") + .or_insert_with(|| JsonValue::Object(Map::new())) + .as_object_mut() + .ok_or_else(|| err!(Database("m.relations is not an object")))?; + + if let JsonValue::Object(aggregations_map) = aggregations_json { + for (rel_type, aggregation) in aggregations_map { + relations.insert(rel_type, aggregation); + } + } + + pdu.unsigned = Some(to_raw_value(&unsigned)?); + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use conduwuit_core::pdu::{EventHash, PduEvent}; + use ruma::{UInt, events::TimelineEventType, owned_event_id, owned_room_id, owned_user_id}; + use serde_json::{Value as JsonValue, json, value::to_raw_value}; + + fn create_test_pdu(unsigned_content: Option) -> PduEvent { + PduEvent { + event_id: owned_event_id!("$test:example.com"), + room_id: owned_room_id!("!test:example.com"), + sender: owned_user_id!("@test:example.com"), + origin_server_ts: UInt::try_from(1_234_567_890_u64).unwrap(), + kind: TimelineEventType::RoomMessage, + content: to_raw_value(&json!({"msgtype": "m.text", "body": "test"})).unwrap(), + state_key: None, + prev_events: vec![], + depth: UInt::from(1_u32), + auth_events: vec![], + redacts: None, + unsigned: unsigned_content.map(|content| to_raw_value(&content).unwrap()), + hashes: EventHash { sha256: "test_hash".to_owned() }, + signatures: None, + origin: None, + } + } + + fn create_bundled_aggregations() -> JsonValue { + json!({ + "m.replace": { + "event_id": "$replace:example.com", + "origin_server_ts": 1_234_567_890, + "sender": "@replacer:example.com" + }, + "m.reference": { + "count": 5, + "chunk": [ + "$ref1:example.com", + "$ref2:example.com" + ] + } + }) + } + + #[test] + fn test_add_bundled_aggregations_to_unsigned_no_existing_unsigned() { + let mut pdu = create_test_pdu(None); + let aggregations = create_bundled_aggregations(); + + let result = super::super::Service::add_bundled_aggregations_to_unsigned( + &mut pdu, + aggregations.clone(), + ); + assert!(result.is_ok(), "Should succeed when no unsigned field exists"); + + assert!(pdu.unsigned.is_some(), "Unsigned field should be created"); + + let unsigned_str = pdu.unsigned.as_ref().unwrap().get(); + let unsigned: JsonValue = serde_json::from_str(unsigned_str).unwrap(); + + assert!(unsigned.get("m.relations").is_some(), "m.relations should exist"); + assert_eq!( + unsigned["m.relations"], aggregations, + "Relations should match the aggregations" + ); + } + + #[test] + fn test_add_bundled_aggregations_to_unsigned_overwrite_same_relation_type() { + let existing_unsigned = json!({ + "m.relations": { + "m.replace": { + "event_id": "$old_replace:example.com", + "origin_server_ts": 1_111_111_111, + "sender": "@old_replacer:example.com" + } + } + }); + + let mut pdu = create_test_pdu(Some(existing_unsigned)); + let new_aggregations = create_bundled_aggregations(); + + let result = super::super::Service::add_bundled_aggregations_to_unsigned( + &mut pdu, + new_aggregations.clone(), + ); + assert!(result.is_ok(), "Should succeed when overwriting same relation type"); + + let unsigned_str = pdu.unsigned.as_ref().unwrap().get(); + let unsigned: JsonValue = serde_json::from_str(unsigned_str).unwrap(); + + let relations = &unsigned["m.relations"]; + + assert_eq!( + relations["m.replace"], new_aggregations["m.replace"], + "m.replace should be updated" + ); + assert_eq!( + relations["m.replace"]["event_id"], "$replace:example.com", + "Should have new event_id" + ); + + assert!(relations.get("m.reference").is_some(), "New m.reference should be added"); + } + + #[test] + fn test_add_bundled_aggregations_to_unsigned_preserve_other_unsigned_fields() { + // Test case: Other unsigned fields should be preserved + let existing_unsigned = json!({ + "age": 98765, + "prev_content": {"msgtype": "m.text", "body": "old message"}, + "redacted_because": {"event_id": "$redaction:example.com"}, + "m.relations": { + "m.annotation": {"count": 1} + } + }); + + let mut pdu = create_test_pdu(Some(existing_unsigned)); + let new_aggregations = json!({ + "m.replace": {"event_id": "$new:example.com"} + }); + + let result = super::super::Service::add_bundled_aggregations_to_unsigned( + &mut pdu, + new_aggregations, + ); + assert!(result.is_ok(), "Should succeed while preserving other fields"); + + let unsigned_str = pdu.unsigned.as_ref().unwrap().get(); + let unsigned: JsonValue = serde_json::from_str(unsigned_str).unwrap(); + + // Verify all existing fields are preserved + assert_eq!(unsigned["age"], 98765, "age should be preserved"); + assert!(unsigned.get("prev_content").is_some(), "prev_content should be preserved"); + assert!( + unsigned.get("redacted_because").is_some(), + "redacted_because should be preserved" + ); + + // Verify relations were merged correctly + let relations = &unsigned["m.relations"]; + assert!( + relations.get("m.annotation").is_some(), + "Existing m.annotation should be preserved" + ); + assert!(relations.get("m.replace").is_some(), "New m.replace should be added"); + } + + #[test] + fn test_add_bundled_aggregations_to_unsigned_invalid_existing_unsigned() { + // Test case: Invalid JSON in existing unsigned should result in error + let mut pdu = create_test_pdu(None); + // Manually set invalid unsigned data + pdu.unsigned = Some(to_raw_value(&"invalid json").unwrap()); + + let aggregations = create_bundled_aggregations(); + let result = + super::super::Service::add_bundled_aggregations_to_unsigned(&mut pdu, aggregations); + + assert!(result.is_err(), "fails when existing unsigned is invalid"); + // Should we ignore the error and overwrite anyway? + } +} diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 18221c2d..2dff54d8 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -1,3 +1,4 @@ +mod bundled_aggregations; mod data; use std::sync::Arc; From 9dfcf3538f200c254d83f58afc83bd213da5431a Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Wed, 4 Jun 2025 22:50:17 +0100 Subject: [PATCH 32/52] fix: Filter out invalid replacements from bundled aggregations --- .../pdu_metadata/bundled_aggregations.rs | 375 +++++++++++++++++- 1 file changed, 373 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/pdu_metadata/bundled_aggregations.rs b/src/service/rooms/pdu_metadata/bundled_aggregations.rs index 4ef8efc1..c47f637a 100644 --- a/src/service/rooms/pdu_metadata/bundled_aggregations.rs +++ b/src/service/rooms/pdu_metadata/bundled_aggregations.rs @@ -43,7 +43,10 @@ impl super::Service { return Ok(None); } - let mut replace_events = Vec::with_capacity(relations.len().min(10)); // Most events have few replacements + // Get the original event for validation of replacement events + let original_event = self.services.timeline.get_pdu(event_id).await?; + + let mut replace_events = Vec::with_capacity(relations.len()); let mut reference_events = Vec::with_capacity(relations.len()); for relation in &relations { @@ -56,7 +59,10 @@ impl super::Service { if let Some(rel_type) = relates_to.get("rel_type") { match rel_type.as_str() { | Some("m.replace") => { - replace_events.push(relation); + // Only consider valid replacements + if Self::is_valid_replacement_event(&original_event, pdu).await? { + replace_events.push(relation); + } }, | Some("m.reference") => { reference_events.push(relation); @@ -228,6 +234,56 @@ impl super::Service { Ok(()) } + + /// Validates that an event is acceptable as a replacement for another event + /// See C/S spec "Validity of replacement events" + #[tracing::instrument(level = "debug")] + async fn is_valid_replacement_event( + original_event: &PduEvent, + replacement_event: &PduEvent, + ) -> Result { + // 1. Same room_id + if original_event.room_id() != replacement_event.room_id() { + return Ok(false); + } + + // 2. Same sender + if original_event.sender() != replacement_event.sender() { + return Ok(false); + } + + // 3. Same type + if original_event.event_type() != replacement_event.event_type() { + return Ok(false); + } + + // 4. Neither event should have a state_key property + if original_event.state_key().is_some() || replacement_event.state_key().is_some() { + return Ok(false); + } + + // 5. Original event must not have rel_type of m.replace + let original_content = original_event.get_content_as_value(); + if let Some(relates_to) = original_content.get("m.relates_to") { + if let Some(rel_type) = relates_to.get("rel_type") { + if rel_type.as_str() == Some("m.replace") { + return Ok(false); + } + } + } + + // 6. Replacement event must have m.new_content property + // Skip this check for encrypted events, as m.new_content would be inside the + // encrypted payload + if replacement_event.event_type() != &ruma::events::TimelineEventType::RoomEncrypted { + let replacement_content = replacement_event.get_content_as_value(); + if replacement_content.get("m.new_content").is_none() { + return Ok(false); + } + } + + Ok(true) + } } #[cfg(test)] @@ -391,4 +447,319 @@ mod tests { assert!(result.is_err(), "fails when existing unsigned is invalid"); // Should we ignore the error and overwrite anyway? } + + // Test helper function to create test PDU events + fn create_test_event( + event_id: &str, + room_id: &str, + sender: &str, + event_type: TimelineEventType, + content: &JsonValue, + state_key: Option<&str>, + ) -> PduEvent { + PduEvent { + event_id: event_id.try_into().unwrap(), + room_id: room_id.try_into().unwrap(), + sender: sender.try_into().unwrap(), + origin_server_ts: UInt::try_from(1_234_567_890_u64).unwrap(), + kind: event_type, + content: to_raw_value(&content).unwrap(), + state_key: state_key.map(Into::into), + prev_events: vec![], + depth: UInt::from(1_u32), + auth_events: vec![], + redacts: None, + unsigned: None, + hashes: EventHash { sha256: "test_hash".to_owned() }, + signatures: None, + origin: None, + } + } + + /// Test that a valid replacement event passes validation + #[tokio::test] + async fn test_valid_replacement_event() { + let original = create_test_event( + "$original:example.com", + "!room:example.com", + "@user:example.com", + TimelineEventType::RoomMessage, + &json!({"msgtype": "m.text", "body": "original message"}), + None, + ); + + let replacement = create_test_event( + "$replacement:example.com", + "!room:example.com", + "@user:example.com", + TimelineEventType::RoomMessage, + &json!({ + "msgtype": "m.text", + "body": "* edited message", + "m.new_content": { + "msgtype": "m.text", + "body": "edited message" + }, + "m.relates_to": { + "rel_type": "m.replace", + "event_id": "$original:example.com" + } + }), + None, + ); + + let result = + super::super::Service::is_valid_replacement_event(&original, &replacement).await; + assert!(result.is_ok(), "Validation should succeed"); + assert!(result.unwrap(), "Valid replacement event should be accepted"); + } + + /// Test replacement event with different room ID is rejected + #[tokio::test] + async fn test_replacement_event_different_room() { + let original = create_test_event( + "$original:example.com", + "!room1:example.com", + "@user:example.com", + TimelineEventType::RoomMessage, + &json!({"msgtype": "m.text", "body": "original message"}), + None, + ); + + let replacement = create_test_event( + "$replacement:example.com", + "!room2:example.com", // Different room + "@user:example.com", + TimelineEventType::RoomMessage, + &json!({ + "msgtype": "m.text", + "body": "* edited message", + "m.new_content": { + "msgtype": "m.text", + "body": "edited message" + } + }), + None, + ); + + let result = + super::super::Service::is_valid_replacement_event(&original, &replacement).await; + assert!(result.is_ok(), "Validation should succeed"); + assert!(!result.unwrap(), "Different room ID should be rejected"); + } + + /// Test replacement event with different sender is rejected + #[tokio::test] + async fn test_replacement_event_different_sender() { + let original = create_test_event( + "$original:example.com", + "!room:example.com", + "@user1:example.com", + TimelineEventType::RoomMessage, + &json!({"msgtype": "m.text", "body": "original message"}), + None, + ); + + let replacement = create_test_event( + "$replacement:example.com", + "!room:example.com", + "@user2:example.com", // Different sender + TimelineEventType::RoomMessage, + &json!({ + "msgtype": "m.text", + "body": "* edited message", + "m.new_content": { + "msgtype": "m.text", + "body": "edited message" + } + }), + None, + ); + + let result = + super::super::Service::is_valid_replacement_event(&original, &replacement).await; + assert!(result.is_ok(), "Validation should succeed"); + assert!(!result.unwrap(), "Different sender should be rejected"); + } + + /// Test replacement event with different type is rejected + #[tokio::test] + async fn test_replacement_event_different_type() { + let original = create_test_event( + "$original:example.com", + "!room:example.com", + "@user:example.com", + TimelineEventType::RoomMessage, + &json!({"msgtype": "m.text", "body": "original message"}), + None, + ); + + let replacement = create_test_event( + "$replacement:example.com", + "!room:example.com", + "@user:example.com", + TimelineEventType::RoomTopic, // Different event type + &json!({ + "topic": "new topic", + "m.new_content": { + "topic": "new topic" + } + }), + None, + ); + + let result = + super::super::Service::is_valid_replacement_event(&original, &replacement).await; + assert!(result.is_ok(), "Validation should succeed"); + assert!(!result.unwrap(), "Different event type should be rejected"); + } + + /// Test replacement event with state key is rejected + #[tokio::test] + async fn test_replacement_event_with_state_key() { + let original = create_test_event( + "$original:example.com", + "!room:example.com", + "@user:example.com", + TimelineEventType::RoomName, + &json!({"name": "room name"}), + Some(""), // Has state key + ); + + let replacement = create_test_event( + "$replacement:example.com", + "!room:example.com", + "@user:example.com", + TimelineEventType::RoomName, + &json!({ + "name": "new room name", + "m.new_content": { + "name": "new room name" + } + }), + None, + ); + + let result = + super::super::Service::is_valid_replacement_event(&original, &replacement).await; + assert!(result.is_ok(), "Validation should succeed"); + assert!(!result.unwrap(), "Event with state key should be rejected"); + } + + /// Test replacement of an event that is already a replacement is rejected + #[tokio::test] + async fn test_replacement_event_original_is_replacement() { + let original = create_test_event( + "$original:example.com", + "!room:example.com", + "@user:example.com", + TimelineEventType::RoomMessage, + &json!({ + "msgtype": "m.text", + "body": "* edited message", + "m.relates_to": { + "rel_type": "m.replace", // Original is already a replacement + "event_id": "$some_other:example.com" + } + }), + None, + ); + + let replacement = create_test_event( + "$replacement:example.com", + "!room:example.com", + "@user:example.com", + TimelineEventType::RoomMessage, + &json!({ + "msgtype": "m.text", + "body": "* edited again", + "m.new_content": { + "msgtype": "m.text", + "body": "edited again" + } + }), + None, + ); + + let result = + super::super::Service::is_valid_replacement_event(&original, &replacement).await; + assert!(result.is_ok(), "Validation should succeed"); + assert!(!result.unwrap(), "Replacement of replacement should be rejected"); + } + + /// Test replacement event missing m.new_content is rejected + #[tokio::test] + async fn test_replacement_event_missing_new_content() { + let original = create_test_event( + "$original:example.com", + "!room:example.com", + "@user:example.com", + TimelineEventType::RoomMessage, + &json!({"msgtype": "m.text", "body": "original message"}), + None, + ); + + let replacement = create_test_event( + "$replacement:example.com", + "!room:example.com", + "@user:example.com", + TimelineEventType::RoomMessage, + &json!({ + "msgtype": "m.text", + "body": "* edited message" + // Missing m.new_content + }), + None, + ); + + let result = + super::super::Service::is_valid_replacement_event(&original, &replacement).await; + assert!(result.is_ok(), "Validation should succeed"); + assert!(!result.unwrap(), "Missing m.new_content should be rejected"); + } + + /// Test encrypted replacement event without m.new_content is accepted + #[tokio::test] + async fn test_replacement_event_encrypted_missing_new_content_is_valid() { + let original = create_test_event( + "$original:example.com", + "!room:example.com", + "@user:example.com", + TimelineEventType::RoomEncrypted, + &json!({ + "algorithm": "m.megolm.v1.aes-sha2", + "ciphertext": "encrypted_payload_base64", + "sender_key": "sender_key", + "session_id": "session_id" + }), + None, + ); + + let replacement = create_test_event( + "$replacement:example.com", + "!room:example.com", + "@user:example.com", + TimelineEventType::RoomEncrypted, + &json!({ + "algorithm": "m.megolm.v1.aes-sha2", + "ciphertext": "encrypted_replacement_payload_base64", + "sender_key": "sender_key", + "session_id": "session_id", + "m.relates_to": { + "rel_type": "m.replace", + "event_id": "$original:example.com" + } + // No m.new_content in cleartext - this is valid for encrypted events + }), + None, + ); + + let result = + super::super::Service::is_valid_replacement_event(&original, &replacement).await; + assert!(result.is_ok(), "Validation should succeed"); + assert!( + result.unwrap(), + "Encrypted replacement without cleartext m.new_content should be accepted" + ); + } } From bc901aece71f8fe50d1f78ebca180fc0ad460fe7 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Fri, 25 Apr 2025 21:06:00 -0700 Subject: [PATCH 33/52] probably incorrectly delete support for non-standardized matrix srv record --- src/service/resolver/actual.rs | 37 ++++++++++++++++------------------ 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index d23ef95a..52cd5d7d 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -306,28 +306,25 @@ impl super::Service { #[tracing::instrument(name = "srv", level = "debug", skip(self))] async fn query_srv_record(&self, hostname: &'_ str) -> Result> { - let hostnames = - [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")]; + self.services.server.check_running()?; - for hostname in hostnames { - self.services.server.check_running()?; + debug!("querying SRV for {hostname:?}"); - debug!("querying SRV for {hostname:?}"); - let hostname = hostname.trim_end_matches('.'); - match self.resolver.resolver.srv_lookup(hostname).await { - | Err(e) => Self::handle_resolve_error(&e, hostname)?, - | Ok(result) => { - return Ok(result.iter().next().map(|result| { - FedDest::Named( - result.target().to_string().trim_end_matches('.').to_owned(), - format!(":{}", result.port()) - .as_str() - .try_into() - .unwrap_or_else(|_| FedDest::default_port()), - ) - })); - }, - } + let hostname_suffix = format!("_matrix-fed._tcp.{hostname}."); + let hostname = hostname_suffix.trim_end_matches('.'); + match self.resolver.resolver.srv_lookup(hostname).await { + | Err(e) => Self::handle_resolve_error(&e, hostname)?, + | Ok(result) => { + return Ok(result.iter().next().map(|result| { + FedDest::Named( + result.target().to_string().trim_end_matches('.').to_owned(), + format!(":{}", result.port()) + .as_str() + .try_into() + .unwrap_or_else(|_| FedDest::default_port()), + ) + })); + }, } Ok(None) From cc1b841eb3ba23ae8249d73710239bc384b2e11c Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Wed, 14 May 2025 06:53:00 -0700 Subject: [PATCH 34/52] bump the number of allowed immutable memtables by 1, to allow for greater flood protection this should probably not be applied if you have rocksdb_atomic_flush = false (the default) --- src/database/engine/cf_opts.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index cbbd1012..666f9f9e 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -29,7 +29,7 @@ fn descriptor_cf_options( set_table_options(&mut opts, &desc, cache)?; opts.set_min_write_buffer_number(1); - opts.set_max_write_buffer_number(2); + opts.set_max_write_buffer_number(3); opts.set_write_buffer_size(desc.write_size); opts.set_target_file_size_base(desc.file_size); From 7da679328bfa300706973d9b47e8022e462077e0 Mon Sep 17 00:00:00 2001 From: Jason Volk Date: Sat, 26 Apr 2025 08:23:57 +0000 Subject: [PATCH 35/52] Mitigate large futures Signed-off-by: Jason Volk --- src/admin/room/moderation.rs | 10 ++++++++-- src/admin/user/commands.rs | 4 +++- src/api/client/account.rs | 8 ++++++-- src/api/client/membership.rs | 15 ++++++++++++--- src/api/client/state.rs | 1 + src/service/admin/mod.rs | 3 +-- 6 files changed, 31 insertions(+), 10 deletions(-) diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index ee429fc6..af6cf928 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -155,7 +155,10 @@ async fn ban_room(&self, room: OwnedRoomOrAliasId) -> Result { evicting admins too)", ); - if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { + if let Err(e) = leave_room(self.services, user_id, &room_id, None) + .boxed() + .await + { warn!("Failed to leave room: {e}"); } @@ -323,7 +326,10 @@ async fn ban_list_of_rooms(&self) -> Result { evicting admins too)", ); - if let Err(e) = leave_room(self.services, user_id, &room_id, None).await { + if let Err(e) = leave_room(self.services, user_id, &room_id, None) + .boxed() + .await + { warn!("Failed to leave room: {e}"); } diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index d094fc5f..79e0b0f8 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -696,7 +696,9 @@ pub(super) async fn force_leave_room( return Err!("{user_id} is not joined in the room"); } - leave_room(self.services, &user_id, &room_id, None).await?; + leave_room(self.services, &user_id, &room_id, None) + .boxed() + .await?; self.write_str(&format!("{user_id} has left {room_id}.",)) .await diff --git a/src/api/client/account.rs b/src/api/client/account.rs index 32f2530c..05dfa8b7 100644 --- a/src/api/client/account.rs +++ b/src/api/client/account.rs @@ -763,7 +763,9 @@ pub(crate) async fn deactivate_route( super::update_displayname(&services, sender_user, None, &all_joined_rooms).await; super::update_avatar_url(&services, sender_user, None, None, &all_joined_rooms).await; - full_user_deactivate(&services, sender_user, &all_joined_rooms).await?; + full_user_deactivate(&services, sender_user, &all_joined_rooms) + .boxed() + .await?; info!("User {sender_user} deactivated their account."); @@ -915,7 +917,9 @@ pub async fn full_user_deactivate( } } - super::leave_all_rooms(services, user_id).await; + super::leave_all_rooms(services, user_id) + .boxed() + .await; Ok(()) } diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index e6392533..b24a6721 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -114,7 +114,9 @@ async fn banned_room_check( .collect() .await; - full_user_deactivate(services, user_id, &all_joined_rooms).await?; + full_user_deactivate(services, user_id, &all_joined_rooms) + .boxed() + .await?; } return Err!(Request(Forbidden("This room is banned on this homeserver."))); @@ -153,7 +155,9 @@ async fn banned_room_check( .collect() .await; - full_user_deactivate(services, user_id, &all_joined_rooms).await?; + full_user_deactivate(services, user_id, &all_joined_rooms) + .boxed() + .await?; } return Err!(Request(Forbidden("This remote server is banned on this homeserver."))); @@ -265,6 +269,7 @@ pub(crate) async fn join_room_by_id_or_alias_route( room_id.server_name(), client, ) + .boxed() .await?; let mut servers = body.via.clone(); @@ -487,6 +492,7 @@ pub(crate) async fn leave_room_route( body: Ruma, ) -> Result { leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()) + .boxed() .await .map(|()| leave_room::v3::Response::new()) } @@ -1825,7 +1831,10 @@ pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { for room_id in all_rooms { // ignore errors - if let Err(e) = leave_room(services, user_id, &room_id, None).await { + if let Err(e) = leave_room(services, user_id, &room_id, None) + .boxed() + .await + { warn!(%user_id, "Failed to leave {room_id} remotely: {e}"); } diff --git a/src/api/client/state.rs b/src/api/client/state.rs index 07802b1b..d1364b8d 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -63,6 +63,7 @@ pub(crate) async fn send_state_event_for_empty_key_route( body: Ruma, ) -> Result> { send_state_event_for_key_route(State(services), body) + .boxed() .await .map(RumaResponse) } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 86e12c3c..c8d8b3b8 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -4,7 +4,6 @@ mod execute; mod grant; use std::{ - future::Future, pin::Pin, sync::{Arc, RwLock as StdRwLock, Weak}, }; @@ -14,7 +13,7 @@ use conduwuit::{ Error, PduEvent, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder, }; pub use create::create_admin_room; -use futures::{FutureExt, TryFutureExt}; +use futures::{Future, FutureExt, TryFutureExt}; use loole::{Receiver, Sender}; use ruma::{ OwnedEventId, OwnedRoomId, RoomId, UserId, From 0c2d8aa7bfdcb9d3da6ee68600b2d7595760e0a4 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Thu, 22 May 2025 16:54:15 -0700 Subject: [PATCH 36/52] add futures::FutureExt to make cb15ac3c0169cc57be584698ec03fbbe1ee9ffce work --- src/admin/room/moderation.rs | 1 + src/admin/user/commands.rs | 1 + src/api/client/state.rs | 1 + 3 files changed, 3 insertions(+) diff --git a/src/admin/room/moderation.rs b/src/admin/room/moderation.rs index af6cf928..4c19da5c 100644 --- a/src/admin/room/moderation.rs +++ b/src/admin/room/moderation.rs @@ -6,6 +6,7 @@ use conduwuit::{ warn, }; use futures::StreamExt; +use futures::FutureExt; use ruma::{OwnedRoomId, OwnedRoomOrAliasId, RoomAliasId, RoomId, RoomOrAliasId}; use crate::{admin_command, admin_command_dispatch, get_room_info}; diff --git a/src/admin/user/commands.rs b/src/admin/user/commands.rs index 79e0b0f8..5b02da54 100644 --- a/src/admin/user/commands.rs +++ b/src/admin/user/commands.rs @@ -9,6 +9,7 @@ use conduwuit::{ }; use conduwuit_api::client::{leave_all_rooms, update_avatar_url, update_displayname}; use futures::StreamExt; +use futures::FutureExt; use ruma::{ OwnedEventId, OwnedRoomId, OwnedRoomOrAliasId, OwnedUserId, UserId, events::{ diff --git a/src/api/client/state.rs b/src/api/client/state.rs index d1364b8d..d0b210c8 100644 --- a/src/api/client/state.rs +++ b/src/api/client/state.rs @@ -6,6 +6,7 @@ use conduwuit::{ }; use conduwuit_service::Services; use futures::TryStreamExt; +use futures::FutureExt; use ruma::{ OwnedEventId, RoomId, UserId, api::client::state::{get_state_events, get_state_events_for_key, send_state_event}, From c93228b8b7e477f11a8d0d1dd288faeef6bb561c Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:02:05 -0700 Subject: [PATCH 37/52] upgrade some settings to enable 5g in continuwuity enable converged 6g at the edge in continuwuity better stateinfo_cache_capacity default better roomid_spacehierarchy_cache_capacity make sender workers default better and clamp value to core count update sender workers documentation add more parallelism_scaled and make them public update 1 document --- conduwuit-example.toml | 6 +-- src/core/config/mod.rs | 75 +++++++++++++++++++------------------- src/service/sending/mod.rs | 12 ++---- 3 files changed, 43 insertions(+), 50 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 1a8be2aa..3c18f027 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1590,11 +1590,9 @@ #stream_amplification = 1024 # Number of sender task workers; determines sender parallelism. Default is -# '0' which means the value is determined internally, likely matching the -# number of tokio worker-threads or number of cores, etc. Override by -# setting a non-zero value. +# number of CPU cores. Override by setting a different value. # -#sender_workers = 0 +#sender_workers = 4 # Enables listener sockets; can be set to false to disable listening. This # option is intended for developer/diagnostic purposes only. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index d4a10345..4ff87a74 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1823,12 +1823,10 @@ pub struct Config { pub stream_amplification: usize, /// Number of sender task workers; determines sender parallelism. Default is - /// '0' which means the value is determined internally, likely matching the - /// number of tokio worker-threads or number of cores, etc. Override by - /// setting a non-zero value. + /// '4'. Override by setting a different value. Values clamped 1 to core count. /// - /// default: 0 - #[serde(default)] + /// default: 4 + #[serde(default = "default_sender_workers")] pub sender_workers: usize, /// Enables listener sockets; can be set to false to disable listening. This @@ -2059,45 +2057,47 @@ fn default_database_backups_to_keep() -> i16 { 1 } fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) } -fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) } +fn default_db_cache_capacity_mb() -> f64 { 512.0 + parallelism_scaled_f64(512.0) } -fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } +fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(500_000) } fn default_cache_capacity_modifier() -> f64 { 1.0 } fn default_auth_chain_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) + parallelism_scaled_u32(50_000).saturating_add(500_000) } fn default_shorteventid_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(100_000) -} - -fn default_eventidshort_cache_capacity() -> u32 { - parallelism_scaled_u32(25_000).saturating_add(100_000) -} - -fn default_eventid_pdu_cache_capacity() -> u32 { - parallelism_scaled_u32(25_000).saturating_add(100_000) -} - -fn default_shortstatekey_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) -} - -fn default_statekeyshort_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) -} - -fn default_servernameevent_data_cache_capacity() -> u32 { parallelism_scaled_u32(100_000).saturating_add(500_000) } -fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) } +fn default_eventidshort_cache_capacity() -> u32 { + parallelism_scaled_u32(100_000).saturating_add(500_000) +} -fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } +fn default_eventid_pdu_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(500_000) +} -fn default_dns_cache_entries() -> u32 { 32768 } +fn default_shortstatekey_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(500_000) +} + +fn default_statekeyshort_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(500_000) +} + +fn default_servernameevent_data_cache_capacity() -> u32 { + parallelism_scaled_u32(200_000).saturating_add(500_000) +} + +fn default_stateinfo_cache_capacity() -> u32 { + parallelism_scaled_u32(500).clamp(100, 12000) } + +fn default_roomid_spacehierarchy_cache_capacity() -> u32 { + parallelism_scaled_u32(500).clamp(100, 12000) } + +fn default_dns_cache_entries() -> u32 { 327680 } fn default_dns_min_ttl() -> u64 { 60 * 180 } @@ -2286,14 +2286,13 @@ fn default_admin_log_capture() -> String { fn default_admin_room_tag() -> String { "m.server_notice".to_owned() } #[allow(clippy::as_conversions, clippy::cast_precision_loss)] -fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) } +pub fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) } -fn parallelism_scaled_u32(val: u32) -> u32 { - let val = val.try_into().expect("failed to cast u32 to usize"); - parallelism_scaled(val).try_into().unwrap_or(u32::MAX) -} +pub fn parallelism_scaled_u32(val: u32) -> u32 { val.saturating_mul(sys::available_parallelism() as u32) } -fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) } +pub fn parallelism_scaled_i32(val: i32) -> i32 { val.saturating_mul(sys::available_parallelism() as i32) } + +pub fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) } fn default_trusted_server_batch_size() -> usize { 256 } @@ -2313,6 +2312,8 @@ fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } +fn default_sender_workers() -> usize { 4 } + fn default_client_receive_timeout() -> u64 { 75 } fn default_client_request_timeout() -> u64 { 180 } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 08ca7010..ce687551 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -401,16 +401,10 @@ impl Service { fn num_senders(args: &crate::Args<'_>) -> usize { const MIN_SENDERS: usize = 1; - // Limit the number of senders to the number of workers threads or number of - // cores, conservatively. - let max_senders = args - .server - .metrics - .num_workers() - .min(available_parallelism()); + // Limit the maximum number of senders to the number of cores. + let max_senders = available_parallelism(); - // If the user doesn't override the default 0, this is intended to then default - // to 1 for now as multiple senders is experimental. + // default is 4 senders. clamp between 1 and core count. args.server .config .sender_workers From 6e9e252391f715d060892578a449d83232023aa2 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 7 Jun 2025 00:46:55 +0100 Subject: [PATCH 38/52] fix an auth rule not applying correctly --- src/core/matrix/state_res/event_auth.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 759ab5cb..855a6f60 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -255,6 +255,16 @@ where }, | Some(e) => e, }; + // just re-check 1.2 to work around a bug + let Some(room_id_server_name) = incoming_event.room_id().server_name() else { + warn!("room ID has no servername"); + return Ok(false); + }; + + if room_id_server_name != sender.server_name() { + warn!("servername of room ID does not match servername of m.room.create sender"); + return Ok(false); + } // 3. If event does not have m.room.create in auth_events reject if !incoming_event From 4ff1553b5690d10604f2a81754696b6b8e17be67 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 7 Jun 2025 00:55:03 +0100 Subject: [PATCH 39/52] Note about ruma#2064 in TODO --- src/core/matrix/state_res/event_auth.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 855a6f60..0c47d3fc 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -217,8 +217,9 @@ where } /* - // TODO: In the past this code caused problems federating with synapse, maybe this has been - // resolved already. Needs testing. + // TODO: In the past this code was commented as it caused problems with Synapse. This is no + // longer the case. This needs to be implemented. + // See also: https://github.com/ruma/ruma/pull/2064 // // 2. Reject if auth_events // a. auth_events cannot have duplicate keys since it's a BTree From cfc5fab813faa33d6a24d857afe27c28ba8b8a51 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 10 Jun 2025 22:33:31 +0100 Subject: [PATCH 40/52] Kick up a fuss when m.room.create is unfindable --- src/core/matrix/state_res/event_auth.rs | 4 ++-- src/core/matrix/state_res/mod.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 0c47d3fc..a6e68a1d 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -30,7 +30,7 @@ use super::{ }, room_version::RoomVersion, }; -use crate::{debug, error, trace, warn}; +use crate::{debug, err_log, error, trace, warn}; // FIXME: field extracting could be bundled for `content` #[derive(Deserialize)] @@ -251,7 +251,7 @@ where let room_create_event = match room_create_event { | None => { - warn!("no m.room.create event in auth chain"); + error!("no m.room.create event in auth chain for {}!", incoming_event.event_id()); return Ok(false); }, | Some(e) => e, diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index 651f6130..38c7c259 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -746,7 +746,7 @@ where } } } - // Did not find a power level event so we default to zero + warn!("could not find a power event in the mainline map, defaulting to zero depth"); Ok(0) } From f0c6391eaa3ed71cb52047856055224966e654cb Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 10 Jun 2025 23:00:09 +0100 Subject: [PATCH 41/52] Fix room ID check --- src/core/matrix/state_res/event_auth.rs | 11 +++++++---- src/service/rooms/event_handler/handle_outlier_pdu.rs | 5 +---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index a6e68a1d..4660b539 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -30,7 +30,7 @@ use super::{ }, room_version::RoomVersion, }; -use crate::{debug, err_log, error, trace, warn}; +use crate::{debug, error, trace, warn}; // FIXME: field extracting could be bundled for `content` #[derive(Deserialize)] @@ -251,7 +251,7 @@ where let room_create_event = match room_create_event { | None => { - error!("no m.room.create event in auth chain for {}!", incoming_event.event_id()); + error!("no m.room.create event found for {}!", incoming_event.event_id()); return Ok(false); }, | Some(e) => e, @@ -262,8 +262,11 @@ where return Ok(false); }; - if room_id_server_name != sender.server_name() { - warn!("servername of room ID does not match servername of m.room.create sender"); + if room_id_server_name != room_create_event.sender().server_name() { + warn!( + "servername of room ID origin ({}) does not match servername of m.room.create sender ({})", + room_id_server_name, + room_create_event.sender().server_name()); return Ok(false); } diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index 5339249d..f9889d58 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -119,10 +119,7 @@ pub(super) async fn handle_outlier_pdu<'a>( } // The original create event must be in the auth events - if !matches!( - auth_events.get(&(StateEventType::RoomCreate, String::new().into())), - Some(_) | None - ) { + if !auth_events.contains_key(&(StateEventType::RoomCreate, String::new().into())) { return Err!(Request(InvalidParam("Incoming event refers to wrong create event."))); } From 70ba586ffd2a8ec196c7dc56580533868eb79b3c Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 00:45:57 +0100 Subject: [PATCH 42/52] Unsafe, untested, and potentially overeager PDU sanity checks --- src/core/matrix/state_res/event_auth.rs | 8 +++++--- src/service/rooms/timeline/mod.rs | 14 ++++++++++++++ 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 4660b539..667b1b36 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -219,7 +219,7 @@ where /* // TODO: In the past this code was commented as it caused problems with Synapse. This is no // longer the case. This needs to be implemented. - // See also: https://github.com/ruma/ruma/pull/2064 + // See also: https://github.com/ruma/ruma/pull/2064 // // 2. Reject if auth_events // a. auth_events cannot have duplicate keys since it's a BTree @@ -264,9 +264,11 @@ where if room_id_server_name != room_create_event.sender().server_name() { warn!( - "servername of room ID origin ({}) does not match servername of m.room.create sender ({})", + "servername of room ID origin ({}) does not match servername of m.room.create \ + sender ({})", room_id_server_name, - room_create_event.sender().server_name()); + room_create_event.sender().server_name() + ); return Ok(false); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index ce99a829..fef7f414 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -700,6 +700,20 @@ impl Service { .await .saturating_add(uint!(1)); + if state_key.is_none() { + if prev_events.is_empty() { + warn!("Timeline event had zero prev_events, something broke."); + return Err!(Request(Unknown("Timeline event had zero prev_events."))); + } + if depth.le(&uint!(2)) { + warn!( + "Had unsafe depth of {depth} in {room_id} when creating non-state event. \ + Bad!" + ); + return Err!(Request(Unknown("Unsafe depth for non-state event."))); + } + }; + let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { From 3a634ff1b2f4433b82512fa781a59e58cf5d4f62 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 01:27:25 +0100 Subject: [PATCH 43/52] more logs --- src/core/matrix/state_res/event_auth.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 667b1b36..0726b00a 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -13,6 +13,7 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, third_party_invite::RoomThirdPartyInviteEventContent, }, + EventId, int, serde::{Base64, Raw}, }; @@ -21,7 +22,6 @@ use serde::{ de::{Error as _, IgnoredAny}, }; use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; - use super::{ Error, Event, Result, StateEventType, StateKey, TimelineEventType, power_levels::{ @@ -251,7 +251,14 @@ where let room_create_event = match room_create_event { | None => { - error!("no m.room.create event found for {}!", incoming_event.event_id()); + error!( + create_event = room_create_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + power_levels = power_levels_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + member_event = sender_member_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + "no m.room.create event found for {} ({})!", + incoming_event.event_id().as_str(), + incoming_event.room_id().as_str() + ); return Ok(false); }, | Some(e) => e, From 09b5c3420e859b8964629c6ada9eafcd6781ee62 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 01:42:19 +0100 Subject: [PATCH 44/52] log which room struggled to get mainline depth --- src/core/matrix/state_res/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index 38c7c259..a2052b92 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -726,8 +726,12 @@ where Fut: Future> + Send, E: Event + Send + Sync, { + let mut room_id = None; while let Some(sort_ev) = event { debug!(event_id = sort_ev.event_id().as_str(), "mainline"); + if room_id.is_none() { + room_id = Some(sort_ev.room_id().to_owned()); + } let id = sort_ev.event_id(); if let Some(depth) = mainline_map.get(id) { @@ -746,7 +750,7 @@ where } } } - warn!("could not find a power event in the mainline map, defaulting to zero depth"); + warn!("could not find a power event in the mainline map for {room_id:?}, defaulting to zero depth"); Ok(0) } From 5c4a9942f4d373766375bc3924decb07453a448e Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 19:53:46 +0100 Subject: [PATCH 45/52] When in doubt, log all the things --- src/api/client/membership.rs | 1 + src/core/matrix/state_res/mod.rs | 2 +- .../rooms/event_handler/handle_outlier_pdu.rs | 7 +++--- .../event_handler/upgrade_outlier_pdu.rs | 22 ++++++++----------- src/service/rooms/timeline/mod.rs | 1 + 5 files changed, 16 insertions(+), 17 deletions(-) diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs index b24a6721..300b6b93 100644 --- a/src/api/client/membership.rs +++ b/src/api/client/membership.rs @@ -1282,6 +1282,7 @@ async fn join_room_by_id_helper_remote( services.rooms.timeline.get_pdu(event_id).await.ok() }; + debug!("running stateres check on send_join parsed PDU"); let auth_check = state_res::event_auth::auth_check( &state_res::RoomVersion::new(&room_version_id)?, &parsed_join_pdu, diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index a2052b92..516115d9 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -609,7 +609,7 @@ where let fetch_state = |ty: &StateEventType, key: &str| { future::ready(auth_state.get(&ty.with_state_key(key))) }; - + debug!("running auth check on {:?}", event.event_id()); let auth_result = auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index f9889d58..87b76222 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -76,7 +76,7 @@ pub(super) async fn handle_outlier_pdu<'a>( // 5. Reject "due to auth events" if can't get all the auth events or some of // the auth events are also rejected "due to auth events" // NOTE: Step 5 is not applied anymore because it failed too often - debug!("Fetching auth events"); + debug!("Fetching auth events for {}", incoming_pdu.event_id); Box::pin(self.fetch_and_handle_outliers( origin, &incoming_pdu.auth_events, @@ -88,12 +88,12 @@ pub(super) async fn handle_outlier_pdu<'a>( // 6. Reject "due to auth events" if the event doesn't pass auth based on the // auth events - debug!("Checking based on auth events"); + debug!("Checking {} based on auth events", incoming_pdu.event_id); // Build map of auth events let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len()); for id in &incoming_pdu.auth_events { let Ok(auth_event) = self.services.timeline.get_pdu(id).await else { - warn!("Could not find auth event {id}"); + warn!("Could not find auth event {id} for {}", incoming_pdu.event_id); continue; }; @@ -128,6 +128,7 @@ pub(super) async fn handle_outlier_pdu<'a>( ready(auth_events.get(&key)) }; + debug!("running auth check to handle outlier pdu {:?}", incoming_pdu.event_id); let auth_check = state_res::event_auth::auth_check( &to_room_version(&room_version_id), &incoming_pdu, diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 97d3df97..0dca2d70 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,12 +1,6 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; -use conduwuit::{ - Err, Result, debug, debug_info, err, implement, - matrix::{EventTypeExt, PduEvent, StateKey, state_res}, - trace, - utils::stream::{BroadbandExt, ReadyExt}, - warn, -}; +use conduwuit::{Err, Result, debug, debug_info, err, implement, matrix::{EventTypeExt, PduEvent, StateKey, state_res}, trace, utils::stream::{BroadbandExt, ReadyExt}, warn, info}; use futures::{FutureExt, StreamExt, future::ready}; use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType}; @@ -44,7 +38,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( return Err!(Request(InvalidParam("Event has been soft failed"))); } - debug!("Upgrading to timeline pdu"); + debug!("Upgrading pdu {} from outlier to timeline pdu", incoming_pdu.event_id); let timer = Instant::now(); let room_version_id = get_room_version_id(create_event)?; @@ -52,7 +46,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // backwards extremities doing all the checks in this list starting at 1. // These are not timeline events. - debug!("Resolving state at event"); + debug!("Resolving state at event {}", incoming_pdu.event_id); let mut state_at_incoming_event = if incoming_pdu.prev_events.len() == 1 { self.state_at_incoming_degree_one(&incoming_pdu).await? } else { @@ -70,7 +64,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( state_at_incoming_event.expect("we always set this to some above"); let room_version = to_room_version(&room_version_id); - debug!("Performing auth check"); + debug!("Performing auth check to upgrade {}", incoming_pdu.event_id); // 11. Check the auth of the event passes based on the state of the event let state_fetch_state = &state_at_incoming_event; let state_fetch = |k: StateEventType, s: StateKey| async move { @@ -80,6 +74,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( self.services.timeline.get_pdu(event_id).await.ok() }; + debug!("running auth check on {}", incoming_pdu.event_id); let auth_check = state_res::event_auth::auth_check( &room_version, &incoming_pdu, @@ -93,7 +88,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( return Err!(Request(Forbidden("Event has failed auth check with state at the event."))); } - debug!("Gathering auth events"); + debug!("Gathering auth events for {}", incoming_pdu.event_id); let auth_events = self .services .state @@ -111,6 +106,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( ready(auth_events.get(&key).cloned()) }; + debug!("running auth check on {} with claimed state auth", incoming_pdu.event_id); let auth_check = state_res::event_auth::auth_check( &room_version, &incoming_pdu, @@ -121,7 +117,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; // Soft fail check before doing state res - debug!("Performing soft-fail check"); + debug!("Performing soft-fail check on {}", incoming_pdu.event_id); let soft_fail = match (auth_check, incoming_pdu.redacts_id(&room_version_id)) { | (false, _) => true, | (true, None) => false, @@ -218,7 +214,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( // 14. Check if the event passes auth based on the "current state" of the room, // if not soft fail it if soft_fail { - debug!("Soft failing event"); + info!("Soft failing event {}", incoming_pdu.event_id); let extremities = extremities.iter().map(Borrow::borrow); self.services diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index fef7f414..7689c8d1 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -785,6 +785,7 @@ impl Service { ready(auth_events.get(&key)) }; + debug!("running auth check on new {} event by {} in {}", pdu.kind, pdu.sender, pdu.room_id); let auth_check = state_res::auth_check( &room_version, &pdu, From 513b057e9da50dbfe2d92b8d11e30f12893d8c25 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Thu, 12 Jun 2025 01:05:45 +0100 Subject: [PATCH 46/52] modify more log strings so they're more useful than not --- src/core/matrix/state_res/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index 516115d9..ea49b29e 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -728,7 +728,7 @@ where { let mut room_id = None; while let Some(sort_ev) = event { - debug!(event_id = sort_ev.event_id().as_str(), "mainline"); + trace!(event_id = sort_ev.event_id().as_str(), "mainline"); if room_id.is_none() { room_id = Some(sort_ev.room_id().to_owned()); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 7689c8d1..4b0b46b1 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1167,7 +1167,7 @@ impl Service { .boxed(); while let Some(ref backfill_server) = servers.next().await { - info!("Asking {backfill_server} for backfill"); + info!("Asking {backfill_server} for backfill in {:?}", room_id.to_owned()); let response = self .services .sending @@ -1195,7 +1195,7 @@ impl Service { } } - info!("No servers could backfill, but backfill was needed in room {room_id}"); + warn!("No servers could backfill, but backfill was needed in room {room_id}"); Ok(()) } From 05b0f2e365a72cf11b77760f241f83708c8948ec Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:02:49 -0700 Subject: [PATCH 47/52] change rocksdb stats level to 3 scale rocksdb background jobs and subcompactions change rocksdb default error level to info from error delete unused num_threads function fix warns from cargo --- conduwuit-example.toml | 2 +- src/core/config/mod.rs | 6 +++--- src/database/engine/db_opts.rs | 22 ++++------------------ 3 files changed, 8 insertions(+), 22 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3c18f027..287bf65f 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -990,7 +990,7 @@ # 3 to 5 = Statistics with possible performance impact. # 6 = All statistics. # -#rocksdb_stats_level = 1 +#rocksdb_stats_level = 3 # This is a password that can be configured that will let you login to the # server bot account (currently `@conduit`) for emergency troubleshooting diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 4ff87a74..14e8fa29 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1155,7 +1155,7 @@ pub struct Config { /// 3 to 5 = Statistics with possible performance impact. /// 6 = All statistics. /// - /// default: 1 + /// default: 3 #[serde(default = "default_rocksdb_stats_level")] pub rocksdb_stats_level: u8, @@ -2199,7 +2199,7 @@ fn default_typing_client_timeout_max_s() -> u64 { 45 } fn default_rocksdb_recovery_mode() -> u8 { 1 } -fn default_rocksdb_log_level() -> String { "error".to_owned() } +fn default_rocksdb_log_level() -> String { "info".to_owned() } fn default_rocksdb_log_time_to_roll() -> usize { 0 } @@ -2231,7 +2231,7 @@ fn default_rocksdb_compression_level() -> i32 { 32767 } #[allow(clippy::doc_markdown)] fn default_rocksdb_bottommost_compression_level() -> i32 { 32767 } -fn default_rocksdb_stats_level() -> u8 { 1 } +fn default_rocksdb_stats_level() -> u8 { 3 } // I know, it's a great name #[must_use] diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 18cec742..1299443d 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -1,8 +1,6 @@ -use std::{cmp, convert::TryFrom}; - -use conduwuit::{Config, Result, utils}; +use conduwuit::{Config, Result}; use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel}; - +use conduwuit::config::{parallelism_scaled_i32, parallelism_scaled_u32}; use super::{cf_opts::cache_size_f64, logger::handle as handle_log}; /// Create database-wide options suitable for opening the database. This also @@ -23,8 +21,8 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul set_logging_defaults(&mut opts, config); // Processing - opts.set_max_background_jobs(num_threads::(config)?); - opts.set_max_subcompactions(num_threads::(config)?); + opts.set_max_background_jobs(parallelism_scaled_i32(1)); + opts.set_max_subcompactions(parallelism_scaled_u32(1)); opts.set_avoid_unnecessary_blocking_io(true); opts.set_max_file_opening_threads(0); @@ -126,15 +124,3 @@ fn set_logging_defaults(opts: &mut Options, config: &Config) { opts.set_callback_logger(rocksdb_log_level, &handle_log); } } - -fn num_threads>(config: &Config) -> Result { - const MIN_PARALLELISM: usize = 2; - - let requested = if config.rocksdb_parallelism_threads != 0 { - config.rocksdb_parallelism_threads - } else { - utils::available_parallelism() - }; - - utils::math::try_into::(cmp::max(MIN_PARALLELISM, requested)) -} From 6958bc150bb9f637244cdfb6442d37ac8437ba0f Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Wed, 18 Jun 2025 12:48:27 -0700 Subject: [PATCH 48/52] make fetching key room events less smart --- src/core/matrix/state_res/event_auth.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 0726b00a..67283b6a 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -242,12 +242,16 @@ where } */ - let (room_create_event, power_levels_event, sender_member_event) = join3( - fetch_state(&StateEventType::RoomCreate, ""), - fetch_state(&StateEventType::RoomPowerLevels, ""), - fetch_state(&StateEventType::RoomMember, sender.as_str()), - ) - .await; + // let (room_create_event, power_levels_event, sender_member_event) = join3( + // fetch_state(&StateEventType::RoomCreate, ""), + // fetch_state(&StateEventType::RoomPowerLevels, ""), + // fetch_state(&StateEventType::RoomMember, sender.as_str()), + // ) + // .await; + + let room_create_event = fetch_state(&StateEventType::RoomCreate, "").await; + let power_levels_event = fetch_state(&StateEventType::RoomPowerLevels, "").await; + let sender_member_event = fetch_state(&StateEventType::RoomMember, sender.as_str()).await; let room_create_event = match room_create_event { | None => { From 8111b89d477d2757d869d3290513dfbc7f234279 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Wed, 18 Jun 2025 16:57:19 -0700 Subject: [PATCH 49/52] lock the getter instead ??? c/o M --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 2 +- src/service/rooms/state/mod.rs | 1 + src/service/rooms/timeline/mod.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 0dca2d70..cbebee4b 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -141,7 +141,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( let extremities: Vec<_> = self .services .state - .get_forward_extremities(room_id) + .get_forward_extremities(room_id, &state_lock) .map(ToOwned::to_owned) .ready_filter(|event_id| { // Remove any that are referenced by this incoming event's prev_events diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 803ba9d7..056da5e6 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -384,6 +384,7 @@ impl Service { pub fn get_forward_extremities<'a>( &'a self, room_id: &'a RoomId, + _state_lock: &'a RoomMutexGuard, ) -> impl Stream + Send + '_ { let prefix = (room_id, Interfix); diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 4b0b46b1..a3709533 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -656,7 +656,7 @@ impl Service { let prev_events: Vec = self .services .state - .get_forward_extremities(room_id) + .get_forward_extremities(room_id, _mutex_lock) .take(20) .map(Into::into) .collect() From 66c2fe8315449862c673d00ed9ccb44b5d7a507d Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 06:52:40 -0700 Subject: [PATCH 50/52] vehicle loan documentation now available at window 7 --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index cbebee4b..d592debe 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -159,6 +159,8 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .collect() .await; + if extremities.len() == 0 { info!("Retained zero extremities when upgrading outlier PDU to timeline PDU with {} previous events", incoming_pdu.prev_events.len()) } + debug!( "Retained {} extremities checked against {} prev_events", extremities.len(), From 4d491335044fe70ce8c7ec343ef3630cafc95d66 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:13:30 -0700 Subject: [PATCH 51/52] sender_workers scaling. this time, with feeling! --- src/core/config/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 14e8fa29..ce6d6046 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1823,9 +1823,9 @@ pub struct Config { pub stream_amplification: usize, /// Number of sender task workers; determines sender parallelism. Default is - /// '4'. Override by setting a different value. Values clamped 1 to core count. + /// core count. Override by setting a different value. /// - /// default: 4 + /// default: core count #[serde(default = "default_sender_workers")] pub sender_workers: usize, @@ -2312,7 +2312,7 @@ fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } -fn default_sender_workers() -> usize { 4 } +fn default_sender_workers() -> usize { parallelism_scaled(1) } fn default_client_receive_timeout() -> u64 { 75 } From cda40e8f9f7546e72b90dccb95938ccf4f96ae78 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Mon, 30 Jun 2025 15:25:11 -0700 Subject: [PATCH 52/52] more funny settings (part 3 of 12) --- src/core/config/mod.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index ce6d6046..e19a5974 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2059,40 +2059,41 @@ fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64( fn default_db_cache_capacity_mb() -> f64 { 512.0 + parallelism_scaled_f64(512.0) } -fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(500_000) } +fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_cache_capacity_modifier() -> f64 { 1.0 } fn default_auth_chain_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_shorteventid_cache_capacity() -> u32 { - parallelism_scaled_u32(100_000).saturating_add(500_000) + parallelism_scaled_u32(100_000).saturating_add(100_000) } fn default_eventidshort_cache_capacity() -> u32 { - parallelism_scaled_u32(100_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_eventid_pdu_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_shortstatekey_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_statekeyshort_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_servernameevent_data_cache_capacity() -> u32 { - parallelism_scaled_u32(200_000).saturating_add(500_000) + parallelism_scaled_u32(100_000).saturating_add(100_000) } fn default_stateinfo_cache_capacity() -> u32 { - parallelism_scaled_u32(500).clamp(100, 12000) } + parallelism_scaled_u32(500).clamp(100, 12000) +} fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(500).clamp(100, 12000) }