From 6e7390a9b7138ff91b76be17249c1a67307bef70 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 00:45:57 +0100 Subject: [PATCH 01/11] Unsafe, untested, and potentially overeager PDU sanity checks --- src/core/matrix/state_res/event_auth.rs | 8 +- src/service/rooms/timeline/mod.rs | 1120 ++++++++++++++++++++++- 2 files changed, 1098 insertions(+), 30 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index bff3d4c8..3415c5dc 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -219,7 +219,7 @@ where /* // TODO: In the past this code was commented as it caused problems with Synapse. This is no // longer the case. This needs to be implemented. - // See also: https://github.com/ruma/ruma/pull/2064 + // See also: https://github.com/ruma/ruma/pull/2064 // // 2. Reject if auth_events // a. auth_events cannot have duplicate keys since it's a BTree @@ -264,9 +264,11 @@ where if room_id_server_name != room_create_event.sender().server_name() { warn!( - "servername of room ID origin ({}) does not match servername of m.room.create sender ({})", + "servername of room ID origin ({}) does not match servername of m.room.create \ + sender ({})", room_id_server_name, - room_create_event.sender().server_name()); + room_create_event.sender().server_name() + ); return Ok(false); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 70c98a09..02efbfe0 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,34 +1,61 @@ -mod append; -mod backfill; -mod build; -mod create; mod data; -mod redact; -use std::{fmt::Write, sync::Arc}; +use std::{ + borrow::Borrow, + cmp, + collections::{BTreeMap, HashSet}, + fmt::Write, + iter::once, + sync::Arc, +}; use async_trait::async_trait; -pub use conduwuit_core::matrix::pdu::{PduId, RawPduId}; -use conduwuit_core::{ - Result, Server, at, err, +pub use conduwuit::matrix::pdu::{PduId, RawPduId}; +use conduwuit::{ + Err, Error, Result, Server, at, debug, debug_warn, err, error, implement, info, matrix::{ - event::Event, - pdu::{PduCount, PduEvent}, + Event, + pdu::{EventHash, PduBuilder, PduCount, PduEvent, gen_event_id}, + state_res::{self, RoomVersion}, }, - utils::{MutexMap, MutexMapGuard, future::TryExtExt, stream::TryIgnore}, - warn, + utils::{ + self, IterStream, MutexMap, MutexMapGuard, ReadyExt, future::TryExtExt, stream::TryIgnore, + }, + validated, warn, +}; +use futures::{ + Future, FutureExt, Stream, StreamExt, TryStreamExt, future, future::ready, pin_mut, }; -use futures::{Future, Stream, TryStreamExt, pin_mut}; use ruma::{ - CanonicalJsonObject, EventId, OwnedEventId, OwnedRoomId, RoomId, UserId, - events::room::encrypted::Relation, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + RoomId, RoomVersionId, ServerName, UserId, + api::federation, + canonical_json::to_canonical_value, + events::{ + GlobalAccountDataEventType, StateEventType, TimelineEventType, + push_rules::PushRulesEvent, + room::{ + create::RoomCreateEventContent, + encrypted::Relation, + member::{MembershipState, RoomMemberEventContent}, + power_levels::RoomPowerLevelsEventContent, + redaction::RoomRedactionEventContent, + }, + }, + push::{Action, Ruleset, Tweak}, + uint, }; use serde::Deserialize; +use serde_json::value::{RawValue as RawJsonValue, to_raw_value}; use self::data::Data; pub use self::data::PdusIterItem; use crate::{ - Dep, account_data, admin, appservice, globals, pusher, rooms, sending, server_keys, users, + Dep, account_data, admin, appservice, + appservice::NamespaceRegex, + globals, pusher, rooms, + rooms::{short::ShortRoomId, state_compressor::CompressedState}, + sending, server_keys, users, }; // Update Relationships @@ -132,12 +159,12 @@ impl crate::Service for Service { impl Service { #[tracing::instrument(skip(self), level = "debug")] - pub async fn first_pdu_in_room(&self, room_id: &RoomId) -> Result { + pub async fn first_pdu_in_room(&self, room_id: &RoomId) -> Result { self.first_item_in_room(room_id).await.map(at!(1)) } #[tracing::instrument(skip(self), level = "debug")] - pub async fn first_item_in_room(&self, room_id: &RoomId) -> Result<(PduCount, impl Event)> { + pub async fn first_item_in_room(&self, room_id: &RoomId) -> Result<(PduCount, PduEvent)> { let pdus = self.pdus(None, room_id, None); pin_mut!(pdus); @@ -147,7 +174,7 @@ impl Service { } #[tracing::instrument(skip(self), level = "debug")] - pub async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result { + pub async fn latest_pdu_in_room(&self, room_id: &RoomId) -> Result { self.db.latest_pdu_in_room(None, room_id).await } @@ -189,14 +216,13 @@ impl Service { /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. #[inline] - pub async fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result { + pub async fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result { self.db.get_non_outlier_pdu(event_id).await } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - #[inline] pub async fn get_pdu(&self, event_id: &EventId) -> Result { self.db.get_pdu(event_id).await } @@ -204,13 +230,11 @@ impl Service { /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - #[inline] pub async fn get_pdu_from_id(&self, pdu_id: &RawPduId) -> Result { self.db.get_pdu_from_id(pdu_id).await } /// Returns the pdu as a `BTreeMap`. - #[inline] pub async fn get_pdu_json_from_id(&self, pdu_id: &RawPduId) -> Result { self.db.get_pdu_json_from_id(pdu_id).await } @@ -218,7 +242,6 @@ impl Service { /// Checks if pdu exists /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - #[inline] pub fn pdu_exists<'a>( &'a self, event_id: &'a EventId, @@ -228,8 +251,772 @@ impl Service { /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self), level = "debug")] - pub async fn replace_pdu(&self, pdu_id: &RawPduId, pdu_json: &CanonicalJsonObject) -> Result { - self.db.replace_pdu(pdu_id, pdu_json).await + pub async fn replace_pdu( + &self, + pdu_id: &RawPduId, + pdu_json: &CanonicalJsonObject, + pdu: &PduEvent, + ) -> Result<()> { + self.db.replace_pdu(pdu_id, pdu_json, pdu).await + } + + /// Creates a new persisted data unit and adds it to a room. + /// + /// By this point the incoming event should be fully authenticated, no auth + /// happens in `append_pdu`. + /// + /// Returns pdu id + #[tracing::instrument(level = "debug", skip_all)] + pub async fn append_pdu<'a, Leaves>( + &'a self, + pdu: &'a PduEvent, + mut pdu_json: CanonicalJsonObject, + leaves: Leaves, + state_lock: &'a RoomMutexGuard, + ) -> Result + where + Leaves: Iterator + Send + 'a, + { + // Coalesce database writes for the remainder of this scope. + let _cork = self.db.db.cork_and_flush(); + + let shortroomid = self + .services + .short + .get_shortroomid(&pdu.room_id) + .await + .map_err(|_| err!(Database("Room does not exist")))?; + + // Make unsigned fields correct. This is not properly documented in the spec, + // but state events need to have previous content in the unsigned field, so + // clients can easily interpret things like membership changes + if let Some(state_key) = &pdu.state_key { + if let CanonicalJsonValue::Object(unsigned) = pdu_json + .entry("unsigned".to_owned()) + .or_insert_with(|| CanonicalJsonValue::Object(BTreeMap::default())) + { + if let Ok(shortstatehash) = self + .services + .state_accessor + .pdu_shortstatehash(&pdu.event_id) + .await + { + if let Ok(prev_state) = self + .services + .state_accessor + .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) + .await + { + unsigned.insert( + "prev_content".to_owned(), + CanonicalJsonValue::Object( + utils::to_canonical_object(prev_state.content.clone()).map_err( + |e| { + error!( + "Failed to convert prev_state to canonical JSON: {e}" + ); + Error::bad_database( + "Failed to convert prev_state to canonical JSON.", + ) + }, + )?, + ), + ); + unsigned.insert( + String::from("prev_sender"), + CanonicalJsonValue::String(prev_state.sender.to_string()), + ); + unsigned.insert( + String::from("replaces_state"), + CanonicalJsonValue::String(prev_state.event_id.to_string()), + ); + } + } + } else { + error!("Invalid unsigned type in pdu."); + } + } + + // We must keep track of all events that have been referenced. + self.services + .pdu_metadata + .mark_as_referenced(&pdu.room_id, pdu.prev_events.iter().map(AsRef::as_ref)); + + self.services + .state + .set_forward_extremities(&pdu.room_id, leaves, state_lock) + .await; + + let insert_lock = self.mutex_insert.lock(&pdu.room_id).await; + + let count1 = self.services.globals.next_count().unwrap(); + // Mark as read first so the sending client doesn't get a notification even if + // appending fails + self.services + .read_receipt + .private_read_set(&pdu.room_id, &pdu.sender, count1); + self.services + .user + .reset_notification_counts(&pdu.sender, &pdu.room_id); + + let count2 = PduCount::Normal(self.services.globals.next_count().unwrap()); + let pdu_id: RawPduId = PduId { shortroomid, shorteventid: count2 }.into(); + + // Insert pdu + self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2).await; + + drop(insert_lock); + + // See if the event matches any known pushers via power level + let power_levels: RoomPowerLevelsEventContent = self + .services + .state_accessor + .room_state_get_content(&pdu.room_id, &StateEventType::RoomPowerLevels, "") + .await + .unwrap_or_default(); + + let sync_pdu = pdu.to_sync_room_event(); + + let mut push_target: HashSet<_> = self + .services + .state_cache + .active_local_users_in_room(&pdu.room_id) + .map(ToOwned::to_owned) + // Don't notify the sender of their own events, and dont send from ignored users + .ready_filter(|user| *user != pdu.sender) + .filter_map(|recipient_user| async move { (!self.services.users.user_is_ignored(&pdu.sender, &recipient_user).await).then_some(recipient_user) }) + .collect() + .await; + + let mut notifies = Vec::with_capacity(push_target.len().saturating_add(1)); + let mut highlights = Vec::with_capacity(push_target.len().saturating_add(1)); + + if pdu.kind == TimelineEventType::RoomMember { + if let Some(state_key) = &pdu.state_key { + let target_user_id = UserId::parse(state_key)?; + + if self.services.users.is_active_local(target_user_id).await { + push_target.insert(target_user_id.to_owned()); + } + } + } + + for user in &push_target { + let rules_for_user = self + .services + .account_data + .get_global(user, GlobalAccountDataEventType::PushRules) + .await + .map_or_else( + |_| Ruleset::server_default(user), + |ev: PushRulesEvent| ev.content.global, + ); + + let mut highlight = false; + let mut notify = false; + + for action in self + .services + .pusher + .get_actions(user, &rules_for_user, &power_levels, &sync_pdu, &pdu.room_id) + .await + { + match action { + | Action::Notify => notify = true, + | Action::SetTweak(Tweak::Highlight(true)) => { + highlight = true; + }, + | _ => {}, + } + + // Break early if both conditions are true + if notify && highlight { + break; + } + } + + if notify { + notifies.push(user.clone()); + } + + if highlight { + highlights.push(user.clone()); + } + + self.services + .pusher + .get_pushkeys(user) + .ready_for_each(|push_key| { + self.services + .sending + .send_pdu_push(&pdu_id, user, push_key.to_owned()) + .expect("TODO: replace with future"); + }) + .await; + } + + self.db + .increment_notification_counts(&pdu.room_id, notifies, highlights); + + match pdu.kind { + | TimelineEventType::RoomRedaction => { + use RoomVersionId::*; + + let room_version_id = self.services.state.get_room_version(&pdu.room_id).await?; + match room_version_id { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + if let Some(redact_id) = &pdu.redacts { + if self + .services + .state_accessor + .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) + .await? + { + self.redact_pdu(redact_id, pdu, shortroomid).await?; + } + } + }, + | _ => { + let content: RoomRedactionEventContent = pdu.get_content()?; + if let Some(redact_id) = &content.redacts { + if self + .services + .state_accessor + .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) + .await? + { + self.redact_pdu(redact_id, pdu, shortroomid).await?; + } + } + }, + } + }, + | TimelineEventType::SpaceChild => + if let Some(_state_key) = &pdu.state_key { + self.services + .spaces + .roomid_spacehierarchy_cache + .lock() + .await + .remove(&pdu.room_id); + }, + | TimelineEventType::RoomMember => { + if let Some(state_key) = &pdu.state_key { + // if the state_key fails + let target_user_id = UserId::parse(state_key) + .expect("This state_key was previously validated"); + + let content: RoomMemberEventContent = pdu.get_content()?; + let stripped_state = match content.membership { + | MembershipState::Invite | MembershipState::Knock => + self.services.state.summary_stripped(pdu).await.into(), + | _ => None, + }; + + // Update our membership info, we do this here incase a user is invited or + // knocked and immediately leaves we need the DB to record the invite or + // knock event for auth + self.services + .state_cache + .update_membership( + &pdu.room_id, + target_user_id, + content, + &pdu.sender, + stripped_state, + None, + true, + ) + .await?; + } + }, + | TimelineEventType::RoomMessage => { + let content: ExtractBody = pdu.get_content()?; + if let Some(body) = content.body { + self.services.search.index_pdu(shortroomid, &pdu_id, &body); + + if self.services.admin.is_admin_command(pdu, &body).await { + self.services.admin.command_with_sender( + body, + Some((*pdu.event_id).into()), + pdu.sender.clone().into(), + )?; + } + } + }, + | _ => {}, + } + + if let Ok(content) = pdu.get_content::() { + if let Ok(related_pducount) = self.get_pdu_count(&content.relates_to.event_id).await { + self.services + .pdu_metadata + .add_relation(count2, related_pducount); + } + } + + if let Ok(content) = pdu.get_content::() { + match content.relates_to { + | Relation::Reply { in_reply_to } => { + // We need to do it again here, because replies don't have + // event_id as a top level field + if let Ok(related_pducount) = self.get_pdu_count(&in_reply_to.event_id).await + { + self.services + .pdu_metadata + .add_relation(count2, related_pducount); + } + }, + | Relation::Thread(thread) => { + self.services + .threads + .add_to_thread(&thread.event_id, pdu) + .await?; + }, + | _ => {}, // TODO: Aggregate other types + } + } + + for appservice in self.services.appservice.read().await.values() { + if self + .services + .state_cache + .appservice_in_room(&pdu.room_id, appservice) + .await + { + self.services + .sending + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + continue; + } + + // If the RoomMember event has a non-empty state_key, it is targeted at someone. + // If it is our appservice user, we send this PDU to it. + if pdu.kind == TimelineEventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + let appservice_uid = appservice.registration.sender_localpart.as_str(); + if state_key_uid == &appservice_uid { + self.services + .sending + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + continue; + } + } + } + + let matching_users = |users: &NamespaceRegex| { + appservice.users.is_match(pdu.sender.as_str()) + || pdu.kind == TimelineEventType::RoomMember + && pdu + .state_key + .as_ref() + .is_some_and(|state_key| users.is_match(state_key)) + }; + let matching_aliases = |aliases: NamespaceRegex| { + self.services + .alias + .local_aliases_for_room(&pdu.room_id) + .ready_any(move |room_alias| aliases.is_match(room_alias.as_str())) + }; + + if matching_aliases(appservice.aliases.clone()).await + || appservice.rooms.is_match(pdu.room_id.as_str()) + || matching_users(&appservice.users) + { + self.services + .sending + .send_pdu_appservice(appservice.registration.id.clone(), pdu_id)?; + } + } + + Ok(pdu_id) + } + + pub async fn create_hash_and_sign_event( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + _mutex_lock: &RoomMutexGuard, /* Take mutex guard to make sure users get the room + * state mutex */ + ) -> Result<(PduEvent, CanonicalJsonObject)> { + let PduBuilder { + event_type, + content, + unsigned, + state_key, + redacts, + timestamp, + } = pdu_builder; + + let prev_events: Vec = self + .services + .state + .get_forward_extremities(room_id) + .take(20) + .map(Into::into) + .collect() + .await; + + // If there was no create event yet, assume we are creating a room + let room_version_id = self + .services + .state + .get_room_version(room_id) + .await + .or_else(|_| { + if event_type == TimelineEventType::RoomCreate { + let content: RoomCreateEventContent = serde_json::from_str(content.get())?; + Ok(content.room_version) + } else { + Err(Error::InconsistentRoomState( + "non-create event for room of unknown version", + room_id.to_owned(), + )) + } + })?; + + let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); + + let auth_events = self + .services + .state + .get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content) + .await?; + + // Our depth is the maximum depth of prev_events + 1 + let depth = prev_events + .iter() + .stream() + .map(Ok) + .and_then(|event_id| self.get_pdu(event_id)) + .and_then(|pdu| future::ok(pdu.depth)) + .ignore_err() + .ready_fold(uint!(0), cmp::max) + .await + .saturating_add(uint!(1)); + + if state_key.is_none() { + if prev_events.is_empty() { + warn!("Timeline event had zero prev_events, something broke."); + return Err!(Request(Unknown("Timeline event had zero prev_events."))); + } + if depth.le(&uint!(2)) { + warn!( + "Had unsafe depth of {depth} in {room_id} when creating non-state event. \ + Bad!" + ); + return Err!(Request(Unknown("Unsafe depth for non-state event."))); + } + }; + + let mut unsigned = unsigned.unwrap_or_default(); + + if let Some(state_key) = &state_key { + if let Ok(prev_pdu) = self + .services + .state_accessor + .room_state_get(room_id, &event_type.to_string().into(), state_key) + .await + { + unsigned.insert("prev_content".to_owned(), prev_pdu.get_content_as_value()); + unsigned.insert( + "prev_sender".to_owned(), + serde_json::to_value(&prev_pdu.sender) + .expect("UserId::to_value always works"), + ); + unsigned.insert( + "replaces_state".to_owned(), + serde_json::to_value(&prev_pdu.event_id).expect("EventId is valid json"), + ); + } + } + if event_type != TimelineEventType::RoomCreate && prev_events.is_empty() { + return Err!(Request(Unknown("Event incorrectly had zero prev_events."))); + } + if state_key.is_none() && depth.lt(&uint!(2)) { + // The first two events in a room are always m.room.create and m.room.member, + // so any other events with that same depth are illegal. + warn!( + "Had unsafe depth {depth} when creating non-state event in {room_id}. Cowardly \ + aborting" + ); + return Err!(Request(Unknown("Unsafe depth for non-state event."))); + } + + let mut pdu = PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater").into(), + room_id: room_id.to_owned(), + sender: sender.to_owned(), + origin: None, + origin_server_ts: timestamp.map_or_else( + || { + utils::millis_since_unix_epoch() + .try_into() + .expect("u64 fits into UInt") + }, + |ts| ts.get(), + ), + kind: event_type, + content, + state_key, + prev_events, + depth, + auth_events: auth_events + .values() + .map(|pdu| pdu.event_id.clone()) + .collect(), + redacts, + unsigned: if unsigned.is_empty() { + None + } else { + Some(to_raw_value(&unsigned).expect("to_raw_value always works")) + }, + hashes: EventHash { sha256: "aaa".to_owned() }, + signatures: None, + }; + + let auth_fetch = |k: &StateEventType, s: &str| { + let key = (k.clone(), s.into()); + ready(auth_events.get(&key)) + }; + + let auth_check = state_res::auth_check( + &room_version, + &pdu, + None, // TODO: third_party_invite + auth_fetch, + ) + .await + .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; + + if !auth_check { + return Err!(Request(Forbidden("Event is not authorized."))); + } + + // Hash and sign + let mut pdu_json = utils::to_canonical_object(&pdu).map_err(|e| { + err!(Request(BadJson(warn!("Failed to convert PDU to canonical JSON: {e}")))) + })?; + + // room v3 and above removed the "event_id" field from remote PDU format + match room_version_id { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => { + pdu_json.remove("event_id"); + }, + } + + // Add origin because synapse likes that (and it's required in the spec) + pdu_json.insert( + "origin".to_owned(), + to_canonical_value(self.services.globals.server_name()) + .expect("server name is a valid CanonicalJsonValue"), + ); + + if let Err(e) = self + .services + .server_keys + .hash_and_sign_event(&mut pdu_json, &room_version_id) + { + return match e { + | Error::Signatures(ruma::signatures::Error::PduSize) => { + Err!(Request(TooLarge("Message/PDU is too long (exceeds 65535 bytes)"))) + }, + | _ => Err!(Request(Unknown(warn!("Signing event failed: {e}")))), + }; + } + + // Generate event id + pdu.event_id = gen_event_id(&pdu_json, &room_version_id)?; + + pdu_json + .insert("event_id".into(), CanonicalJsonValue::String(pdu.event_id.clone().into())); + + // Generate short event id + let _shorteventid = self + .services + .short + .get_or_create_shorteventid(&pdu.event_id) + .await; + + Ok((pdu, pdu_json)) + } + + /// Creates a new persisted data unit and adds it to a room. This function + /// takes a roomid_mutex_state, meaning that only this function is able to + /// mutate the room state. + #[tracing::instrument(skip(self, state_lock), level = "debug")] + pub async fn build_and_append_pdu( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + state_lock: &RoomMutexGuard, + ) -> Result { + let (pdu, pdu_json) = self + .create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock) + .await?; + + if self.services.admin.is_admin_room(&pdu.room_id).await { + self.check_pdu_for_admin_room(&pdu, sender).boxed().await?; + } + + // If redaction event is not authorized, do not append it to the timeline + if pdu.kind == TimelineEventType::RoomRedaction { + use RoomVersionId::*; + match self.services.state.get_room_version(&pdu.room_id).await? { + | V1 | V2 | V3 | V4 | V5 | V6 | V7 | V8 | V9 | V10 => { + if let Some(redact_id) = &pdu.redacts { + if !self + .services + .state_accessor + .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) + .await? + { + return Err!(Request(Forbidden("User cannot redact this event."))); + } + } + }, + | _ => { + let content: RoomRedactionEventContent = pdu.get_content()?; + if let Some(redact_id) = &content.redacts { + if !self + .services + .state_accessor + .user_can_redact(redact_id, &pdu.sender, &pdu.room_id, false) + .await? + { + return Err!(Request(Forbidden("User cannot redact this event."))); + } + } + }, + } + } + + if pdu.kind == TimelineEventType::RoomMember { + let content: RoomMemberEventContent = pdu.get_content()?; + + if content.join_authorized_via_users_server.is_some() + && content.membership != MembershipState::Join + { + return Err!(Request(BadJson( + "join_authorised_via_users_server is only for member joins" + ))); + } + + if content + .join_authorized_via_users_server + .as_ref() + .is_some_and(|authorising_user| { + !self.services.globals.user_is_local(authorising_user) + }) { + return Err!(Request(InvalidParam( + "Authorising user does not belong to this homeserver" + ))); + } + } + + // We append to state before appending the pdu, so we don't have a moment in + // time with the pdu without it's state. This is okay because append_pdu can't + // fail. + let statehashid = self.services.state.append_to_state(&pdu).await?; + + let pdu_id = self + .append_pdu( + &pdu, + pdu_json, + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + once(pdu.event_id.borrow()), + state_lock, + ) + .boxed() + .await?; + + // We set the room state after inserting the pdu, so that we never have a moment + // in time where events in the current room state do not exist + self.services + .state + .set_room_state(&pdu.room_id, statehashid, state_lock); + + let mut servers: HashSet = self + .services + .state_cache + .room_servers(&pdu.room_id) + .map(ToOwned::to_owned) + .collect() + .await; + + // In case we are kicking or banning a user, we need to inform their server of + // the change + if pdu.kind == TimelineEventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + servers.insert(state_key_uid.server_name().to_owned()); + } + } + + // Remove our server from the server list since it will be added to it by + // room_servers() and/or the if statement above + servers.remove(self.services.globals.server_name()); + + self.services + .sending + .send_pdu_servers(servers.iter().map(AsRef::as_ref).stream(), &pdu_id) + .await?; + + Ok(pdu.event_id) + } + + /// Append the incoming event setting the state snapshot to the state from + /// the server that sent the event. + #[tracing::instrument(level = "debug", skip_all)] + pub async fn append_incoming_pdu<'a, Leaves>( + &'a self, + pdu: &'a PduEvent, + pdu_json: CanonicalJsonObject, + new_room_leaves: Leaves, + state_ids_compressed: Arc, + soft_fail: bool, + state_lock: &'a RoomMutexGuard, + ) -> Result> + where + Leaves: Iterator + Send + 'a, + { + // We append to state before appending the pdu, so we don't have a moment in + // time with the pdu without it's state. This is okay because append_pdu can't + // fail. + self.services + .state + .set_event_state(&pdu.event_id, &pdu.room_id, state_ids_compressed) + .await?; + + if soft_fail { + self.services + .pdu_metadata + .mark_as_referenced(&pdu.room_id, pdu.prev_events.iter().map(AsRef::as_ref)); + + self.services + .state + .set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock) + .await; + + return Ok(None); + } + + let pdu_id = self + .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) + .await?; + + Ok(Some(pdu_id)) } /// Returns an iterator over all PDUs in a room. Unknown rooms produce no @@ -266,4 +1053,283 @@ impl Service { self.db .pdus(user_id, room_id, from.unwrap_or_else(PduCount::min)) } + + /// Replace a PDU with the redacted form. + #[tracing::instrument(name = "redact", level = "debug", skip(self))] + pub async fn redact_pdu( + &self, + event_id: &EventId, + reason: &PduEvent, + shortroomid: ShortRoomId, + ) -> Result { + // TODO: Don't reserialize, keep original json + let Ok(pdu_id) = self.get_pdu_id(event_id).await else { + // If event does not exist, just noop + return Ok(()); + }; + + let mut pdu = self.get_pdu_from_id(&pdu_id).await.map_err(|e| { + err!(Database(error!(?pdu_id, ?event_id, ?e, "PDU ID points to invalid PDU."))) + })?; + + if let Ok(content) = pdu.get_content::() { + if let Some(body) = content.body { + self.services + .search + .deindex_pdu(shortroomid, &pdu_id, &body); + } + } + + let room_version_id = self.services.state.get_room_version(&pdu.room_id).await?; + + pdu.redact(&room_version_id, reason)?; + + let obj = utils::to_canonical_object(&pdu).map_err(|e| { + err!(Database(error!(?event_id, ?e, "Failed to convert PDU to canonical JSON"))) + })?; + + self.replace_pdu(&pdu_id, &obj, &pdu).await + } + + #[tracing::instrument(name = "backfill", level = "debug", skip(self))] + pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { + if self + .services + .state_cache + .room_joined_count(room_id) + .await + .is_ok_and(|count| count <= 1) + && !self + .services + .state_accessor + .is_world_readable(room_id) + .await + { + // Room is empty (1 user or none), there is no one that can backfill + return Ok(()); + } + + let first_pdu = self + .first_item_in_room(room_id) + .await + .expect("Room is not empty"); + + if first_pdu.0 < from { + // No backfill required, there are still events between them + return Ok(()); + } + + let power_levels: RoomPowerLevelsEventContent = self + .services + .state_accessor + .room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "") + .await + .unwrap_or_default(); + + let room_mods = power_levels.users.iter().filter_map(|(user_id, level)| { + if level > &power_levels.users_default + && !self.services.globals.user_is_local(user_id) + { + Some(user_id.server_name()) + } else { + None + } + }); + + let canonical_room_alias_server = once( + self.services + .state_accessor + .get_canonical_alias(room_id) + .await, + ) + .filter_map(Result::ok) + .map(|alias| alias.server_name().to_owned()) + .stream(); + + let mut servers = room_mods + .stream() + .map(ToOwned::to_owned) + .chain(canonical_room_alias_server) + .chain( + self.services + .server + .config + .trusted_servers + .iter() + .map(ToOwned::to_owned) + .stream(), + ) + .ready_filter(|server_name| !self.services.globals.server_is_ours(server_name)) + .filter_map(|server_name| async move { + self.services + .state_cache + .server_in_room(&server_name, room_id) + .await + .then_some(server_name) + }) + .boxed(); + + while let Some(ref backfill_server) = servers.next().await { + info!("Asking {backfill_server} for backfill"); + let response = self + .services + .sending + .send_federation_request( + backfill_server, + federation::backfill::get_backfill::v1::Request { + room_id: room_id.to_owned(), + v: vec![first_pdu.1.event_id.clone()], + limit: uint!(100), + }, + ) + .await; + match response { + | Ok(response) => { + for pdu in response.pdus { + if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await { + debug_warn!("Failed to add backfilled pdu in room {room_id}: {e}"); + } + } + return Ok(()); + }, + | Err(e) => { + warn!("{backfill_server} failed to provide backfill for room {room_id}: {e}"); + }, + } + } + + info!("No servers could backfill, but backfill was needed in room {room_id}"); + Ok(()) + } + + #[tracing::instrument(skip(self, pdu), level = "debug")] + pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box) -> Result<()> { + let (room_id, event_id, value) = + self.services.event_handler.parse_incoming_pdu(&pdu).await?; + + // Lock so we cannot backfill the same pdu twice at the same time + let mutex_lock = self + .services + .event_handler + .mutex_federation + .lock(&room_id) + .await; + + // Skip the PDU if we already have it as a timeline event + if let Ok(pdu_id) = self.get_pdu_id(&event_id).await { + debug!("We already know {event_id} at {pdu_id:?}"); + return Ok(()); + } + + self.services + .event_handler + .handle_incoming_pdu(origin, &room_id, &event_id, value, false) + .boxed() + .await?; + + let value = self.get_pdu_json(&event_id).await?; + + let pdu = self.get_pdu(&event_id).await?; + + let shortroomid = self.services.short.get_shortroomid(&room_id).await?; + + let insert_lock = self.mutex_insert.lock(&room_id).await; + + let count: i64 = self.services.globals.next_count().unwrap().try_into()?; + + let pdu_id: RawPduId = PduId { + shortroomid, + shorteventid: PduCount::Backfilled(validated!(0 - count)), + } + .into(); + + // Insert pdu + self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value); + + drop(insert_lock); + + if pdu.kind == TimelineEventType::RoomMessage { + let content: ExtractBody = pdu.get_content()?; + if let Some(body) = content.body { + self.services.search.index_pdu(shortroomid, &pdu_id, &body); + } + } + drop(mutex_lock); + + debug!("Prepended backfill pdu"); + Ok(()) + } +} + +#[implement(Service)] +#[tracing::instrument(skip_all, level = "debug")] +async fn check_pdu_for_admin_room(&self, pdu: &PduEvent, sender: &UserId) -> Result<()> { + match &pdu.kind { + | TimelineEventType::RoomEncryption => { + return Err!(Request(Forbidden(error!("Encryption not supported in admins room.")))); + }, + | TimelineEventType::RoomMember => { + let target = pdu + .state_key() + .filter(|v| v.starts_with('@')) + .unwrap_or(sender.as_str()); + + let server_user = &self.services.globals.server_user.to_string(); + + let content: RoomMemberEventContent = pdu.get_content()?; + match content.membership { + | MembershipState::Leave => { + if target == server_user { + return Err!(Request(Forbidden(error!( + "Server user cannot leave the admins room." + )))); + } + + let count = self + .services + .state_cache + .room_members(&pdu.room_id) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .ready_filter(|user| *user != target) + .boxed() + .count() + .await; + + if count < 2 { + return Err!(Request(Forbidden(error!( + "Last admin cannot leave the admins room." + )))); + } + }, + + | MembershipState::Ban if pdu.state_key().is_some() => { + if target == server_user { + return Err!(Request(Forbidden(error!( + "Server cannot be banned from admins room." + )))); + } + + let count = self + .services + .state_cache + .room_members(&pdu.room_id) + .ready_filter(|user| self.services.globals.user_is_local(user)) + .ready_filter(|user| *user != target) + .boxed() + .count() + .await; + + if count < 2 { + return Err!(Request(Forbidden(error!( + "Last admin cannot be banned from admins room." + )))); + } + }, + | _ => {}, + } + }, + | _ => {}, + } + + Ok(()) } From feeb418e81acca411e77951ad93123fa347f918f Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 01:27:25 +0100 Subject: [PATCH 02/11] more logs --- src/core/matrix/state_res/event_auth.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 3415c5dc..830b5460 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -13,6 +13,7 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, third_party_invite::RoomThirdPartyInviteEventContent, }, + EventId, int, serde::{Base64, Raw}, }; @@ -21,7 +22,6 @@ use serde::{ de::{Error as _, IgnoredAny}, }; use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; - use super::{ Error, Event, Result, StateEventType, StateKey, TimelineEventType, power_levels::{ @@ -251,7 +251,14 @@ where let room_create_event = match room_create_event { | None => { - error!("no m.room.create event found for {}!", incoming_event.event_id()); + error!( + create_event = room_create_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + power_levels = power_levels_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + member_event = sender_member_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + "no m.room.create event found for {} ({})!", + incoming_event.event_id().as_str(), + incoming_event.room_id().as_str() + ); return Ok(false); }, | Some(e) => e, From 17fb69e3c2281c97ba6c6cc206a02150a39741b1 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 01:42:19 +0100 Subject: [PATCH 03/11] log which room struggled to get mainline depth --- src/core/matrix/state_res/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index e721e14c..ba9c013d 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -733,8 +733,12 @@ where Fut: Future> + Send, E: Event + Send + Sync, { + let mut room_id = None; while let Some(sort_ev) = event { debug!(event_id = sort_ev.event_id().as_str(), "mainline"); + if room_id.is_none() { + room_id = Some(sort_ev.room_id().to_owned()); + } let id = sort_ev.event_id(); if let Some(depth) = mainline_map.get(id) { @@ -753,7 +757,7 @@ where } } } - warn!("could not find a power event in the mainline map, defaulting to zero depth"); + warn!("could not find a power event in the mainline map for {room_id:?}, defaulting to zero depth"); Ok(0) } From 864704a536be702dba4e4558e99c114455a8ffa7 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 19:53:46 +0100 Subject: [PATCH 04/11] When in doubt, log all the things --- src/api/client/membership.rs | 2765 +++++++++++++++++ src/core/matrix/state_res/mod.rs | 2 +- .../rooms/event_handler/handle_outlier_pdu.rs | 7 +- .../event_handler/upgrade_outlier_pdu.rs | 72 +- src/service/rooms/timeline/mod.rs | 1 + 5 files changed, 2803 insertions(+), 44 deletions(-) create mode 100644 src/api/client/membership.rs diff --git a/src/api/client/membership.rs b/src/api/client/membership.rs new file mode 100644 index 00000000..28768fee --- /dev/null +++ b/src/api/client/membership.rs @@ -0,0 +1,2765 @@ +use std::{ + borrow::Borrow, + collections::{HashMap, HashSet}, + iter::once, + net::IpAddr, + sync::Arc, +}; + +use axum::extract::State; +use axum_client_ip::InsecureClientIp; +use conduwuit::{ + Err, Result, at, debug, debug_error, debug_info, debug_warn, err, error, info, is_matching, + matrix::{ + StateKey, + pdu::{PduBuilder, PduEvent, gen_event_id, gen_event_id_canonical_json}, + state_res, + }, + result::{FlatOk, NotFound}, + trace, + utils::{ + self, FutureBoolExt, + future::ReadyEqExt, + shuffle, + stream::{BroadbandExt, IterStream, ReadyExt}, + }, + warn, +}; +use conduwuit_service::{ + Services, + appservice::RegistrationInfo, + rooms::{ + state::RoomMutexGuard, + state_compressor::{CompressedState, HashSetCompressStateEvent}, + }, +}; +use futures::{FutureExt, StreamExt, TryFutureExt, join, pin_mut}; +use ruma::{ + CanonicalJsonObject, CanonicalJsonValue, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, ServerName, UserId, + api::{ + client::{ + error::ErrorKind, + knock::knock_room, + membership::{ + ThirdPartySigned, ban_user, forget_room, + get_member_events::{self, v3::MembershipEventFilter}, + invite_user, join_room_by_id, join_room_by_id_or_alias, + joined_members::{self, v3::RoomMember}, + joined_rooms, kick_user, leave_room, unban_user, + }, + }, + federation::{self, membership::create_invite}, + }, + canonical_json::to_canonical_value, + events::{ + StateEventType, + room::{ + join_rules::{AllowRule, JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + }, +}; + +use crate::{Ruma, client::full_user_deactivate}; + +/// Checks if the room is banned in any way possible and the sender user is not +/// an admin. +/// +/// Performs automatic deactivation if `auto_deactivate_banned_room_attempts` is +/// enabled +#[tracing::instrument(skip(services))] +async fn banned_room_check( + services: &Services, + user_id: &UserId, + room_id: Option<&RoomId>, + server_name: Option<&ServerName>, + client_ip: IpAddr, +) -> Result { + if services.users.is_admin(user_id).await { + return Ok(()); + } + + if let Some(room_id) = room_id { + if services.rooms.metadata.is_banned(room_id).await + || services + .moderation + .is_remote_server_forbidden(room_id.server_name().expect("legacy room mxid")) + { + warn!( + "User {user_id} who is not an admin attempted to send an invite for or \ + attempted to join a banned room or banned room server name: {room_id}" + ); + + if services.server.config.auto_deactivate_banned_room_attempts { + warn!( + "Automatically deactivating user {user_id} due to attempted banned room join" + ); + + if services.server.config.admin_room_notices { + services + .admin + .send_text(&format!( + "Automatically deactivating user {user_id} due to attempted banned \ + room join from IP {client_ip}" + )) + .await; + } + + let all_joined_rooms: Vec = services + .rooms + .state_cache + .rooms_joined(user_id) + .map(Into::into) + .collect() + .await; + + full_user_deactivate(services, user_id, &all_joined_rooms).await?; + } + + return Err!(Request(Forbidden("This room is banned on this homeserver."))); + } + } else if let Some(server_name) = server_name { + if services + .config + .forbidden_remote_server_names + .is_match(server_name.host()) + { + warn!( + "User {user_id} who is not an admin tried joining a room which has the server \ + name {server_name} that is globally forbidden. Rejecting.", + ); + + if services.server.config.auto_deactivate_banned_room_attempts { + warn!( + "Automatically deactivating user {user_id} due to attempted banned room join" + ); + + if services.server.config.admin_room_notices { + services + .admin + .send_text(&format!( + "Automatically deactivating user {user_id} due to attempted banned \ + room join from IP {client_ip}" + )) + .await; + } + + let all_joined_rooms: Vec = services + .rooms + .state_cache + .rooms_joined(user_id) + .map(Into::into) + .collect() + .await; + + full_user_deactivate(services, user_id, &all_joined_rooms).await?; + } + + return Err!(Request(Forbidden("This remote server is banned on this homeserver."))); + } + } + + Ok(()) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/join` +/// +/// Tries to join the sender user into a room. +/// +/// - If the server knowns about this room: creates the join event and does auth +/// rules locally +/// - If the server does not know about the room: asks other servers over +/// federation +#[tracing::instrument(skip_all, fields(%client), name = "join")] +pub(crate) async fn join_room_by_id_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + + banned_room_check( + &services, + sender_user, + Some(&body.room_id), + body.room_id.server_name(), + client, + ) + .await?; + + // There is no body.server_name for /roomId/join + let mut servers: Vec<_> = services + .rooms + .state_cache + .servers_invite_via(&body.room_id) + .map(ToOwned::to_owned) + .collect() + .await; + + servers.extend( + services + .rooms + .state_cache + .invite_state(sender_user, &body.room_id) + .await + .unwrap_or_default() + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + + if let Some(server) = body.room_id.server_name() { + servers.push(server.into()); + } + + servers.sort_unstable(); + servers.dedup(); + shuffle(&mut servers); + + join_room_by_id_helper( + &services, + sender_user, + &body.room_id, + body.reason.clone(), + &servers, + body.third_party_signed.as_ref(), + &body.appservice_info, + ) + .boxed() + .await +} + +/// # `POST /_matrix/client/r0/join/{roomIdOrAlias}` +/// +/// Tries to join the sender user into a room. +/// +/// - If the server knowns about this room: creates the join event and does auth +/// rules locally +/// - If the server does not know about the room: use the server name query +/// param if specified. if not specified, asks other servers over federation +/// via room alias server name and room ID server name +#[tracing::instrument(skip_all, fields(%client), name = "join")] +pub(crate) async fn join_room_by_id_or_alias_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_deref().expect("user is authenticated"); + let appservice_info = &body.appservice_info; + let body = body.body; + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { + | Ok(room_id) => { + banned_room_check( + &services, + sender_user, + Some(&room_id), + room_id.server_name(), + client, + ) + .await?; + + let mut servers = body.via.clone(); + servers.extend( + services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned) + .collect::>() + .await, + ); + + servers.extend( + services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default() + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + + if let Some(server) = room_id.server_name() { + servers.push(server.to_owned()); + } + + servers.sort_unstable(); + servers.dedup(); + shuffle(&mut servers); + + (servers, room_id) + }, + | Err(room_alias) => { + let (room_id, mut servers) = services + .rooms + .alias + .resolve_alias(&room_alias, Some(body.via.clone())) + .await?; + + banned_room_check( + &services, + sender_user, + Some(&room_id), + Some(room_alias.server_name()), + client, + ) + .await?; + + let addl_via_servers = services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned); + + let addl_state_servers = services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default(); + + let mut addl_servers: Vec<_> = addl_state_servers + .iter() + .map(|event| event.get_field("sender")) + .filter_map(FlatOk::flat_ok) + .map(|user: &UserId| user.server_name().to_owned()) + .stream() + .chain(addl_via_servers) + .collect() + .await; + + addl_servers.sort_unstable(); + addl_servers.dedup(); + shuffle(&mut addl_servers); + servers.append(&mut addl_servers); + + (servers, room_id) + }, + }; + + let join_room_response = join_room_by_id_helper( + &services, + sender_user, + &room_id, + body.reason.clone(), + &servers, + body.third_party_signed.as_ref(), + appservice_info, + ) + .boxed() + .await?; + + Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id }) +} + +/// # `POST /_matrix/client/*/knock/{roomIdOrAlias}` +/// +/// Tries to knock the room to ask permission to join for the sender user. +#[tracing::instrument(skip_all, fields(%client), name = "knock")] +pub(crate) async fn knock_room_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + let body = &body.body; + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias.clone()) { + | Ok(room_id) => { + banned_room_check( + &services, + sender_user, + Some(&room_id), + room_id.server_name(), + client, + ) + .await?; + + let mut servers = body.via.clone(); + servers.extend( + services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned) + .collect::>() + .await, + ); + + servers.extend( + services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default() + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + + if let Some(server) = room_id.server_name() { + servers.push(server.to_owned()); + } + + servers.sort_unstable(); + servers.dedup(); + shuffle(&mut servers); + + (servers, room_id) + }, + | Err(room_alias) => { + let (room_id, mut servers) = services + .rooms + .alias + .resolve_alias(&room_alias, Some(body.via.clone())) + .await?; + + banned_room_check( + &services, + sender_user, + Some(&room_id), + Some(room_alias.server_name()), + client, + ) + .await?; + + let addl_via_servers = services + .rooms + .state_cache + .servers_invite_via(&room_id) + .map(ToOwned::to_owned); + + let addl_state_servers = services + .rooms + .state_cache + .invite_state(sender_user, &room_id) + .await + .unwrap_or_default(); + + let mut addl_servers: Vec<_> = addl_state_servers + .iter() + .map(|event| event.get_field("sender")) + .filter_map(FlatOk::flat_ok) + .map(|user: &UserId| user.server_name().to_owned()) + .stream() + .chain(addl_via_servers) + .collect() + .await; + + addl_servers.sort_unstable(); + addl_servers.dedup(); + shuffle(&mut addl_servers); + servers.append(&mut addl_servers); + + (servers, room_id) + }, + }; + + knock_room_by_id_helper(&services, sender_user, &room_id, body.reason.clone(), &servers) + .boxed() + .await +} + +/// # `POST /_matrix/client/v3/rooms/{roomId}/leave` +/// +/// Tries to leave the sender user from a room. +/// +/// - This should always work if the user is currently joined. +pub(crate) async fn leave_room_route( + State(services): State, + body: Ruma, +) -> Result { + leave_room(&services, body.sender_user(), &body.room_id, body.reason.clone()) + .await + .map(|()| leave_room::v3::Response::new()) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/invite` +/// +/// Tries to send an invite event into the room. +#[tracing::instrument(skip_all, fields(%client), name = "invite")] +pub(crate) async fn invite_user_route( + State(services): State, + InsecureClientIp(client): InsecureClientIp, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + + if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { + debug_error!( + "User {sender_user} is not an admin and attempted to send an invite to room {}", + &body.room_id + ); + return Err!(Request(Forbidden("Invites are not allowed on this server."))); + } + + banned_room_check( + &services, + sender_user, + Some(&body.room_id), + body.room_id.server_name(), + client, + ) + .await?; + + match &body.recipient { + | invite_user::v3::InvitationRecipient::UserId { user_id } => { + let sender_ignored_recipient = services.users.user_is_ignored(sender_user, user_id); + let recipient_ignored_by_sender = + services.users.user_is_ignored(user_id, sender_user); + + let (sender_ignored_recipient, recipient_ignored_by_sender) = + join!(sender_ignored_recipient, recipient_ignored_by_sender); + + if sender_ignored_recipient { + return Ok(invite_user::v3::Response {}); + } + + if let Ok(target_user_membership) = services + .rooms + .state_accessor + .get_member(&body.room_id, user_id) + .await + { + if target_user_membership.membership == MembershipState::Ban { + return Err!(Request(Forbidden("User is banned from this room."))); + } + } + + if recipient_ignored_by_sender { + // silently drop the invite to the recipient if they've been ignored by the + // sender, pretend it worked + return Ok(invite_user::v3::Response {}); + } + + invite_helper( + &services, + sender_user, + user_id, + &body.room_id, + body.reason.clone(), + false, + ) + .boxed() + .await?; + + Ok(invite_user::v3::Response {}) + }, + | _ => { + Err!(Request(NotFound("User not found."))) + }, + } +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/kick` +/// +/// Tries to send a kick event into the room. +pub(crate) async fn kick_user_route( + State(services): State, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + + let Ok(event) = services + .rooms + .state_accessor + .get_member(&body.room_id, &body.user_id) + .await + else { + // copy synapse's behaviour of returning 200 without any change to the state + // instead of erroring on left users + return Ok(kick_user::v3::Response::new()); + }; + + if !matches!( + event.membership, + MembershipState::Invite | MembershipState::Knock | MembershipState::Join, + ) { + return Err!(Request(Forbidden( + "Cannot kick a user who is not apart of the room (current membership: {})", + event.membership + ))); + } + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Leave, + reason: body.reason.clone(), + is_direct: None, + join_authorized_via_users_server: None, + third_party_invite: None, + ..event + }), + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(kick_user::v3::Response::new()) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/ban` +/// +/// Tries to send a ban event into the room. +pub(crate) async fn ban_user_route( + State(services): State, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + + if sender_user == body.user_id { + return Err!(Request(Forbidden("You cannot ban yourself."))); + } + + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + + let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + + let current_member_content = services + .rooms + .state_accessor + .get_member(&body.room_id, &body.user_id) + .await + .unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Ban)); + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Ban, + reason: body.reason.clone(), + displayname: None, // display name may be offensive + avatar_url: None, // avatar may be offensive + is_direct: None, + join_authorized_via_users_server: None, + third_party_invite: None, + redact_events: body.redact_events, + ..current_member_content + }), + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(ban_user::v3::Response::new()) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/unban` +/// +/// Tries to send an unban event into the room. +pub(crate) async fn unban_user_route( + State(services): State, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + if services.users.is_suspended(sender_user).await? { + return Err!(Request(UserSuspended("You cannot perform this action while suspended."))); + } + let state_lock = services.rooms.state.mutex.lock(&body.room_id).await; + + let current_member_content = services + .rooms + .state_accessor + .get_member(&body.room_id, &body.user_id) + .await + .unwrap_or_else(|_| RoomMemberEventContent::new(MembershipState::Leave)); + + if current_member_content.membership != MembershipState::Ban { + return Err!(Request(Forbidden( + "Cannot unban a user who is not banned (current membership: {})", + current_member_content.membership + ))); + } + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(body.user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Leave, + reason: body.reason.clone(), + join_authorized_via_users_server: None, + third_party_invite: None, + is_direct: None, + ..current_member_content + }), + sender_user, + &body.room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(unban_user::v3::Response::new()) +} + +/// # `POST /_matrix/client/v3/rooms/{roomId}/forget` +/// +/// Forgets about a room. +/// +/// - If the sender user currently left the room: Stops sender user from +/// receiving information about the room +/// +/// Note: Other devices of the user have no way of knowing the room was +/// forgotten, so this has to be called from every device +pub(crate) async fn forget_room_route( + State(services): State, + body: Ruma, +) -> Result { + let user_id = body.sender_user(); + let room_id = &body.room_id; + + let joined = services.rooms.state_cache.is_joined(user_id, room_id); + let knocked = services.rooms.state_cache.is_knocked(user_id, room_id); + let invited = services.rooms.state_cache.is_invited(user_id, room_id); + + pin_mut!(joined, knocked, invited); + if joined.or(knocked).or(invited).await { + return Err!(Request(Unknown("You must leave the room before forgetting it"))); + } + + let membership = services + .rooms + .state_accessor + .get_member(room_id, user_id) + .await; + + if membership.is_not_found() { + return Err!(Request(Unknown("No membership event was found, room was never joined"))); + } + + let non_membership = membership + .map(|member| member.membership) + .is_ok_and(is_matching!(MembershipState::Leave | MembershipState::Ban)); + + if non_membership || services.rooms.state_cache.is_left(user_id, room_id).await { + services.rooms.state_cache.forget(room_id, user_id); + } + + Ok(forget_room::v3::Response::new()) +} + +/// # `POST /_matrix/client/r0/joined_rooms` +/// +/// Lists all rooms the user has joined. +pub(crate) async fn joined_rooms_route( + State(services): State, + body: Ruma, +) -> Result { + Ok(joined_rooms::v3::Response { + joined_rooms: services + .rooms + .state_cache + .rooms_joined(body.sender_user()) + .map(ToOwned::to_owned) + .collect() + .await, + }) +} + +fn membership_filter( + pdu: PduEvent, + for_membership: Option<&MembershipEventFilter>, + not_membership: Option<&MembershipEventFilter>, +) -> Option { + let membership_state_filter = match for_membership { + | Some(MembershipEventFilter::Ban) => MembershipState::Ban, + | Some(MembershipEventFilter::Invite) => MembershipState::Invite, + | Some(MembershipEventFilter::Knock) => MembershipState::Knock, + | Some(MembershipEventFilter::Leave) => MembershipState::Leave, + | Some(_) | None => MembershipState::Join, + }; + + let not_membership_state_filter = match not_membership { + | Some(MembershipEventFilter::Ban) => MembershipState::Ban, + | Some(MembershipEventFilter::Invite) => MembershipState::Invite, + | Some(MembershipEventFilter::Join) => MembershipState::Join, + | Some(MembershipEventFilter::Knock) => MembershipState::Knock, + | Some(_) | None => MembershipState::Leave, + }; + + let evt_membership = pdu.get_content::().ok()?.membership; + + if for_membership.is_some() && not_membership.is_some() { + if membership_state_filter != evt_membership + || not_membership_state_filter == evt_membership + { + None + } else { + Some(pdu) + } + } else if for_membership.is_some() && not_membership.is_none() { + if membership_state_filter != evt_membership { + None + } else { + Some(pdu) + } + } else if not_membership.is_some() && for_membership.is_none() { + if not_membership_state_filter == evt_membership { + None + } else { + Some(pdu) + } + } else { + Some(pdu) + } +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/members` +/// +/// Lists all joined users in a room (TODO: at a specific point in time, with a +/// specific membership). +/// +/// - Only works if the user is currently joined +pub(crate) async fn get_member_events_route( + State(services): State, + body: Ruma, +) -> Result { + let sender_user = body.sender_user(); + let membership = body.membership.as_ref(); + let not_membership = body.not_membership.as_ref(); + + if !services + .rooms + .state_accessor + .user_can_see_state_events(sender_user, &body.room_id) + .await + { + return Err!(Request(Forbidden("You don't have permission to view this room."))); + } + + Ok(get_member_events::v3::Response { + chunk: services + .rooms + .state_accessor + .room_state_full(&body.room_id) + .ready_filter_map(Result::ok) + .ready_filter(|((ty, _), _)| *ty == StateEventType::RoomMember) + .map(at!(1)) + .ready_filter_map(|pdu| membership_filter(pdu, membership, not_membership)) + .map(PduEvent::into_member_event) + .collect() + .await, + }) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members` +/// +/// Lists all members of a room. +/// +/// - The sender user must be in the room +/// - TODO: An appservice just needs a puppet joined +pub(crate) async fn joined_members_route( + State(services): State, + body: Ruma, +) -> Result { + if !services + .rooms + .state_accessor + .user_can_see_state_events(body.sender_user(), &body.room_id) + .await + { + return Err!(Request(Forbidden("You don't have permission to view this room."))); + } + + Ok(joined_members::v3::Response { + joined: services + .rooms + .state_cache + .room_members(&body.room_id) + .map(ToOwned::to_owned) + .broad_then(|user_id| async move { + let member = RoomMember { + display_name: services.users.displayname(&user_id).await.ok(), + avatar_url: services.users.avatar_url(&user_id).await.ok(), + }; + + (user_id, member) + }) + .collect() + .await, + }) +} + +pub async fn join_room_by_id_helper( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + third_party_signed: Option<&ThirdPartySigned>, + appservice_info: &Option, +) -> Result { + let state_lock = services.rooms.state.mutex.lock(room_id).await; + + let user_is_guest = services + .users + .is_deactivated(sender_user) + .await + .unwrap_or(false) + && appservice_info.is_none(); + + if user_is_guest && !services.rooms.state_accessor.guest_can_join(room_id).await { + return Err!(Request(Forbidden("Guests are not allowed to join this room"))); + } + + if services + .rooms + .state_cache + .is_joined(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already joined in {room_id}"); + return Ok(join_room_by_id::v3::Response { room_id: room_id.into() }); + } + + let server_in_room = services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await; + + // Only check our known membership if we're already in the room. + // See: https://forgejo.ellis.link/continuwuation/continuwuity/issues/855 + let membership = if server_in_room { + services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .await + } else { + debug!("Ignoring local state for join {room_id}, we aren't in the room yet."); + Ok(RoomMemberEventContent::new(MembershipState::Leave)) + }; + if let Ok(m) = membership { + if m.membership == MembershipState::Ban { + debug_warn!("{sender_user} is banned from {room_id} but attempted to join"); + // TODO: return reason + return Err!(Request(Forbidden("You are banned from the room."))); + } + } + + let local_join = server_in_room + || servers.is_empty() + || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); + + if local_join { + join_room_by_id_helper_local( + services, + sender_user, + room_id, + reason, + servers, + third_party_signed, + state_lock, + ) + .boxed() + .await?; + } else { + // Ask a remote server if we are not participating in this room + join_room_by_id_helper_remote( + services, + sender_user, + room_id, + reason, + servers, + third_party_signed, + state_lock, + ) + .boxed() + .await?; + } + + Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) +} + +#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_remote")] +async fn join_room_by_id_helper_remote( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + _third_party_signed: Option<&ThirdPartySigned>, + state_lock: RoomMutexGuard, +) -> Result { + info!("Joining {room_id} over federation."); + + let (make_join_response, remote_server) = + make_join_request(services, sender_user, room_id, servers).await?; + + info!("make_join finished"); + + let Some(room_version_id) = make_join_response.room_version else { + return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); + }; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + + let mut join_event_stub: CanonicalJsonObject = + serde_json::from_str(make_join_response.event.get()).map_err(|e| { + err!(BadServerResponse(warn!( + "Invalid make_join event json received from server: {e:?}" + ))) + })?; + + let join_authorized_via_users_server = { + use RoomVersionId::*; + if !matches!(room_version_id, V1 | V2 | V3 | V4 | V5 | V6 | V7) { + join_event_stub + .get("content") + .map(|s| { + s.as_object()? + .get("join_authorised_via_users_server")? + .as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()) + } else { + None + } + }; + + join_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + join_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + join_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + join_authorized_via_users_server: join_authorized_via_users_server.clone(), + ..RoomMemberEventContent::new(MembershipState::Join) + }) + .expect("event is valid, we just created it"), + ); + + // We keep the "event_id" in the pdu only in v1 or + // v2 rooms + match room_version_id { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => { + join_event_stub.remove("event_id"); + }, + } + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&join_event_stub, &room_version_id)?; + + // Add event_id back + join_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let mut join_event = join_event_stub; + + info!("Asking {remote_server} for send_join in room {room_id}"); + let send_join_request = federation::membership::create_join_event::v2::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + omit_members: false, + pdu: services + .sending + .convert_to_outgoing_federation_event(join_event.clone()) + .await, + }; + + let send_join_response = match services + .sending + .send_synapse_request(&remote_server, send_join_request) + .await + { + | Ok(response) => response, + | Err(e) => { + error!("send_join failed: {e}"); + return Err(e); + }, + }; + + info!("send_join finished"); + + if join_authorized_via_users_server.is_some() { + if let Some(signed_raw) = &send_join_response.room_state.event { + debug_info!( + "There is a signed event with join_authorized_via_users_server. This room is \ + probably using restricted joins. Adding signature to our event" + ); + + let (signed_event_id, signed_value) = + gen_event_id_canonical_json(signed_raw, &room_version_id).map_err(|e| { + err!(Request(BadJson(warn!( + "Could not convert event to canonical JSON: {e}" + )))) + })?; + + if signed_event_id != event_id { + return Err!(Request(BadJson(warn!( + %signed_event_id, %event_id, + "Server {remote_server} sent event with wrong event ID" + )))); + } + + match signed_value["signatures"] + .as_object() + .ok_or_else(|| { + err!(BadServerResponse(warn!( + "Server {remote_server} sent invalid signatures type" + ))) + }) + .and_then(|e| { + e.get(remote_server.as_str()).ok_or_else(|| { + err!(BadServerResponse(warn!( + "Server {remote_server} did not send its signature for a restricted \ + room" + ))) + }) + }) { + | Ok(signature) => { + join_event + .get_mut("signatures") + .expect("we created a valid pdu") + .as_object_mut() + .expect("we created a valid pdu") + .insert(remote_server.to_string(), signature.clone()); + }, + | Err(e) => { + warn!( + "Server {remote_server} sent invalid signature in send_join signatures \ + for event {signed_value:?}: {e:?}", + ); + }, + } + } + } + + services + .rooms + .short + .get_or_create_shortroomid(room_id) + .await; + + info!("Parsing join event"); + let parsed_join_pdu = PduEvent::from_id_val(&event_id, join_event.clone()) + .map_err(|e| err!(BadServerResponse("Invalid join event PDU: {e:?}")))?; + + info!("Acquiring server signing keys for response events"); + let resp_events = &send_join_response.room_state; + let resp_state = &resp_events.state; + let resp_auth = &resp_events.auth_chain; + services + .server_keys + .acquire_events_pubkeys(resp_auth.iter().chain(resp_state.iter())) + .await; + + info!("Going through send_join response room_state"); + let cork = services.db.cork_and_flush(); + let state = send_join_response + .room_state + .state + .iter() + .stream() + .then(|pdu| { + services + .server_keys + .validate_and_add_event_id_no_fetch(pdu, &room_version_id) + }) + .ready_filter_map(Result::ok) + .fold(HashMap::new(), |mut state, (event_id, value)| async move { + let pdu = match PduEvent::from_id_val(&event_id, value.clone()) { + | Ok(pdu) => pdu, + | Err(e) => { + debug_warn!("Invalid PDU in send_join response: {e:?}: {value:#?}"); + return state; + }, + }; + + services.rooms.outlier.add_pdu_outlier(&event_id, &value); + if let Some(state_key) = &pdu.state_key { + let shortstatekey = services + .rooms + .short + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key) + .await; + + state.insert(shortstatekey, pdu.event_id.clone()); + } + + state + }) + .await; + + drop(cork); + + info!("Going through send_join response auth_chain"); + let cork = services.db.cork_and_flush(); + send_join_response + .room_state + .auth_chain + .iter() + .stream() + .then(|pdu| { + services + .server_keys + .validate_and_add_event_id_no_fetch(pdu, &room_version_id) + }) + .ready_filter_map(Result::ok) + .ready_for_each(|(event_id, value)| { + services.rooms.outlier.add_pdu_outlier(&event_id, &value); + }) + .await; + + drop(cork); + + debug!("Running send_join auth check"); + let fetch_state = &state; + let state_fetch = |k: StateEventType, s: StateKey| async move { + let shortstatekey = services.rooms.short.get_shortstatekey(&k, &s).await.ok()?; + + let event_id = fetch_state.get(&shortstatekey)?; + services.rooms.timeline.get_pdu(event_id).await.ok() + }; + + debug!("running stateres check on send_join parsed PDU"); + let auth_check = state_res::event_auth::auth_check( + &state_res::RoomVersion::new(&room_version_id)?, + &parsed_join_pdu, + None, // TODO: third party invite + |k, s| state_fetch(k.clone(), s.into()), + ) + .await + .map_err(|e| err!(Request(Forbidden(warn!("Auth check failed: {e:?}")))))?; + + if !auth_check { + return Err!(Request(Forbidden("Auth check failed"))); + } + + info!("Compressing state from send_join"); + let compressed: CompressedState = services + .rooms + .state_compressor + .compress_state_events(state.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) + .collect() + .await; + + debug!("Saving compressed state"); + let HashSetCompressStateEvent { + shortstatehash: statehash_before_join, + added, + removed, + } = services + .rooms + .state_compressor + .save_state(room_id, Arc::new(compressed)) + .await?; + + debug!("Forcing state for new room"); + services + .rooms + .state + .force_state(room_id, statehash_before_join, added, removed, &state_lock) + .await?; + + info!("Updating joined counts for new room"); + services + .rooms + .state_cache + .update_joined_count(room_id) + .await; + + // We append to state before appending the pdu, so we don't have a moment in + // time with the pdu without it's state. This is okay because append_pdu can't + // fail. + let statehash_after_join = services + .rooms + .state + .append_to_state(&parsed_join_pdu) + .await?; + + info!("Appending new room join event"); + services + .rooms + .timeline + .append_pdu( + &parsed_join_pdu, + join_event, + once(parsed_join_pdu.event_id.borrow()), + &state_lock, + ) + .await?; + + info!("Setting final room state for new room"); + // We set the room state after inserting the pdu, so that we never have a moment + // in time where events in the current room state do not exist + services + .rooms + .state + .set_room_state(room_id, statehash_after_join, &state_lock); + + Ok(()) +} + +#[tracing::instrument(skip_all, fields(%sender_user, %room_id), name = "join_local")] +async fn join_room_by_id_helper_local( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + _third_party_signed: Option<&ThirdPartySigned>, + state_lock: RoomMutexGuard, +) -> Result { + debug_info!("We can join locally"); + + let join_rules_event_content = services + .rooms + .state_accessor + .room_state_get_content::( + room_id, + &StateEventType::RoomJoinRules, + "", + ) + .await; + + let restriction_rooms = match join_rules_event_content { + | Ok(RoomJoinRulesEventContent { + join_rule: JoinRule::Restricted(restricted) | JoinRule::KnockRestricted(restricted), + }) => restricted + .allow + .into_iter() + .filter_map(|a| match a { + | AllowRule::RoomMembership(r) => Some(r.room_id), + | _ => None, + }) + .collect(), + | _ => Vec::new(), + }; + + let join_authorized_via_users_server: Option = { + if restriction_rooms + .iter() + .stream() + .any(|restriction_room_id| { + services + .rooms + .state_cache + .is_joined(sender_user, restriction_room_id) + }) + .await + { + services + .rooms + .state_cache + .local_users_in_room(room_id) + .filter(|user| { + services.rooms.state_accessor.user_can_invite( + room_id, + user, + sender_user, + &state_lock, + ) + }) + .boxed() + .next() + .await + .map(ToOwned::to_owned) + } else { + None + } + }; + + let content = RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason: reason.clone(), + join_authorized_via_users_server, + ..RoomMemberEventContent::new(MembershipState::Join) + }; + + // Try normal join first + let Err(error) = services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(sender_user.to_string(), &content), + sender_user, + room_id, + &state_lock, + ) + .await + else { + return Ok(()); + }; + + if restriction_rooms.is_empty() + && (servers.is_empty() + || servers.len() == 1 && services.globals.server_is_ours(&servers[0])) + { + return Err(error); + } + + warn!( + "We couldn't do the join locally, maybe federation can help to satisfy the restricted \ + join requirements" + ); + let Ok((make_join_response, remote_server)) = + make_join_request(services, sender_user, room_id, servers).await + else { + return Err(error); + }; + + let Some(room_version_id) = make_join_response.room_version else { + return Err!(BadServerResponse("Remote room version is not supported by conduwuit")); + }; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + + let mut join_event_stub: CanonicalJsonObject = + serde_json::from_str(make_join_response.event.get()).map_err(|e| { + err!(BadServerResponse("Invalid make_join event json received from server: {e:?}")) + })?; + + let join_authorized_via_users_server = join_event_stub + .get("content") + .map(|s| { + s.as_object()? + .get("join_authorised_via_users_server")? + .as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); + + join_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + join_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + join_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + join_authorized_via_users_server, + ..RoomMemberEventContent::new(MembershipState::Join) + }) + .expect("event is valid, we just created it"), + ); + + // We keep the "event_id" in the pdu only in v1 or + // v2 rooms + match room_version_id { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => { + join_event_stub.remove("event_id"); + }, + } + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut join_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&join_event_stub, &room_version_id)?; + + // Add event_id back + join_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let join_event = join_event_stub; + + let send_join_response = services + .sending + .send_synapse_request( + &remote_server, + federation::membership::create_join_event::v2::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + omit_members: false, + pdu: services + .sending + .convert_to_outgoing_federation_event(join_event.clone()) + .await, + }, + ) + .await?; + + if let Some(signed_raw) = send_join_response.room_state.event { + let (signed_event_id, signed_value) = + gen_event_id_canonical_json(&signed_raw, &room_version_id).map_err(|e| { + err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))) + })?; + + if signed_event_id != event_id { + return Err!(Request(BadJson( + warn!(%signed_event_id, %event_id, "Server {remote_server} sent event with wrong event ID") + ))); + } + + drop(state_lock); + services + .rooms + .event_handler + .handle_incoming_pdu(&remote_server, room_id, &signed_event_id, signed_value, true) + .boxed() + .await?; + } else { + return Err(error); + } + + Ok(()) +} + +async fn make_join_request( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + servers: &[OwnedServerName], +) -> Result<(federation::membership::prepare_join_event::v1::Response, OwnedServerName)> { + let mut make_join_response_and_server = + Err!(BadServerResponse("No server available to assist in joining.")); + + let mut make_join_counter: usize = 0; + let mut incompatible_room_version_count: usize = 0; + + for remote_server in servers { + if services.globals.server_is_ours(remote_server) { + continue; + } + info!("Asking {remote_server} for make_join ({make_join_counter})"); + let make_join_response = services + .sending + .send_federation_request( + remote_server, + federation::membership::prepare_join_event::v1::Request { + room_id: room_id.to_owned(), + user_id: sender_user.to_owned(), + ver: services.server.supported_room_versions().collect(), + }, + ) + .await; + + trace!("make_join response: {:?}", make_join_response); + make_join_counter = make_join_counter.saturating_add(1); + + if let Err(ref e) = make_join_response { + if matches!( + e.kind(), + ErrorKind::IncompatibleRoomVersion { .. } | ErrorKind::UnsupportedRoomVersion + ) { + incompatible_room_version_count = + incompatible_room_version_count.saturating_add(1); + } + + if incompatible_room_version_count > 15 { + info!( + "15 servers have responded with M_INCOMPATIBLE_ROOM_VERSION or \ + M_UNSUPPORTED_ROOM_VERSION, assuming that conduwuit does not support the \ + room version {room_id}: {e}" + ); + make_join_response_and_server = + Err!(BadServerResponse("Room version is not supported by Conduwuit")); + return make_join_response_and_server; + } + + if make_join_counter > 40 { + warn!( + "40 servers failed to provide valid make_join response, assuming no server \ + can assist in joining." + ); + make_join_response_and_server = + Err!(BadServerResponse("No server available to assist in joining.")); + + return make_join_response_and_server; + } + } + + make_join_response_and_server = make_join_response.map(|r| (r, remote_server.clone())); + + if make_join_response_and_server.is_ok() { + break; + } + } + + make_join_response_and_server +} + +pub(crate) async fn invite_helper( + services: &Services, + sender_user: &UserId, + user_id: &UserId, + room_id: &RoomId, + reason: Option, + is_direct: bool, +) -> Result { + if !services.users.is_admin(sender_user).await && services.config.block_non_admin_invites { + info!( + "User {sender_user} is not an admin and attempted to send an invite to room \ + {room_id}" + ); + return Err!(Request(Forbidden("Invites are not allowed on this server."))); + } + + if !services.globals.user_is_local(user_id) { + let (pdu, pdu_json, invite_room_state) = { + let state_lock = services.rooms.state.mutex.lock(room_id).await; + + let content = RoomMemberEventContent { + avatar_url: services.users.avatar_url(user_id).await.ok(), + is_direct: Some(is_direct), + reason, + ..RoomMemberEventContent::new(MembershipState::Invite) + }; + + let (pdu, pdu_json) = services + .rooms + .timeline + .create_hash_and_sign_event( + PduBuilder::state(user_id.to_string(), &content), + sender_user, + room_id, + &state_lock, + ) + .await?; + + let invite_room_state = services.rooms.state.summary_stripped(&pdu).await; + + drop(state_lock); + + (pdu, pdu_json, invite_room_state) + }; + + let room_version_id = services.rooms.state.get_room_version(room_id).await?; + + let response = services + .sending + .send_federation_request(user_id.server_name(), create_invite::v2::Request { + room_id: room_id.to_owned(), + event_id: (*pdu.event_id).to_owned(), + room_version: room_version_id.clone(), + event: services + .sending + .convert_to_outgoing_federation_event(pdu_json.clone()) + .await, + invite_room_state, + via: services + .rooms + .state_cache + .servers_route_via(room_id) + .await + .ok(), + }) + .await?; + + // We do not add the event_id field to the pdu here because of signature and + // hashes checks + let (event_id, value) = gen_event_id_canonical_json(&response.event, &room_version_id) + .map_err(|e| { + err!(Request(BadJson(warn!("Could not convert event to canonical JSON: {e}")))) + })?; + + if pdu.event_id != event_id { + return Err!(Request(BadJson(warn!( + %pdu.event_id, %event_id, + "Server {} sent event with wrong event ID", + user_id.server_name() + )))); + } + + let origin: OwnedServerName = serde_json::from_value(serde_json::to_value( + value + .get("origin") + .ok_or_else(|| err!(Request(BadJson("Event missing origin field."))))?, + )?) + .map_err(|e| { + err!(Request(BadJson(warn!("Origin field in event is not a valid server name: {e}")))) + })?; + + let pdu_id = services + .rooms + .event_handler + .handle_incoming_pdu(&origin, room_id, &event_id, value, true) + .boxed() + .await? + .ok_or_else(|| { + err!(Request(InvalidParam("Could not accept incoming PDU as timeline event."))) + })?; + + return services.sending.send_pdu_room(room_id, &pdu_id).await; + } + + if !services + .rooms + .state_cache + .is_joined(sender_user, room_id) + .await + { + return Err!(Request(Forbidden( + "You must be joined in the room you are trying to invite from." + ))); + } + + let state_lock = services.rooms.state.mutex.lock(room_id).await; + + let content = RoomMemberEventContent { + displayname: services.users.displayname(user_id).await.ok(), + avatar_url: services.users.avatar_url(user_id).await.ok(), + blurhash: services.users.blurhash(user_id).await.ok(), + is_direct: Some(is_direct), + reason, + ..RoomMemberEventContent::new(MembershipState::Invite) + }; + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(user_id.to_string(), &content), + sender_user, + room_id, + &state_lock, + ) + .await?; + + drop(state_lock); + + Ok(()) +} + +// Make a user leave all their joined rooms, rescinds knocks, forgets all rooms, +// and ignores errors +pub async fn leave_all_rooms(services: &Services, user_id: &UserId) { + let rooms_joined = services + .rooms + .state_cache + .rooms_joined(user_id) + .map(ToOwned::to_owned); + + let rooms_invited = services + .rooms + .state_cache + .rooms_invited(user_id) + .map(|(r, _)| r); + + let rooms_knocked = services + .rooms + .state_cache + .rooms_knocked(user_id) + .map(|(r, _)| r); + + let all_rooms: Vec<_> = rooms_joined + .chain(rooms_invited) + .chain(rooms_knocked) + .collect() + .await; + + for room_id in all_rooms { + // ignore errors + if let Err(e) = leave_room(services, user_id, &room_id, None).await { + warn!(%user_id, "Failed to leave {room_id} remotely: {e}"); + } + + services.rooms.state_cache.forget(&room_id, user_id); + } +} + +pub async fn leave_room( + services: &Services, + user_id: &UserId, + room_id: &RoomId, + reason: Option, +) -> Result { + let default_member_content = RoomMemberEventContent { + membership: MembershipState::Leave, + reason: reason.clone(), + join_authorized_via_users_server: None, + is_direct: None, + avatar_url: None, + displayname: None, + third_party_invite: None, + blurhash: None, + redact_events: None, + }; + + let is_banned = services.rooms.metadata.is_banned(room_id); + let is_disabled = services.rooms.metadata.is_disabled(room_id); + + pin_mut!(is_banned, is_disabled); + if is_banned.or(is_disabled).await { + // the room is banned/disabled, the room must be rejected locally since we + // cant/dont want to federate with this server + services + .rooms + .state_cache + .update_membership( + room_id, + user_id, + default_member_content, + user_id, + None, + None, + true, + ) + .await?; + + return Ok(()); + } + + let dont_have_room = services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .eq(&false); + + let not_knocked = services + .rooms + .state_cache + .is_knocked(user_id, room_id) + .eq(&false); + + // Ask a remote server if we don't have this room and are not knocking on it + if dont_have_room.and(not_knocked).await { + if let Err(e) = remote_leave_room(services, user_id, room_id, reason.clone()) + .boxed() + .await + { + warn!(%user_id, "Failed to leave room {room_id} remotely: {e}"); + // Don't tell the client about this error + } + + let last_state = services + .rooms + .state_cache + .invite_state(user_id, room_id) + .or_else(|_| services.rooms.state_cache.knock_state(user_id, room_id)) + .or_else(|_| services.rooms.state_cache.left_state(user_id, room_id)) + .await + .ok(); + + // We always drop the invite, we can't rely on other servers + services + .rooms + .state_cache + .update_membership( + room_id, + user_id, + default_member_content, + user_id, + last_state, + None, + true, + ) + .await?; + } else { + let state_lock = services.rooms.state.mutex.lock(room_id).await; + + let Ok(event) = services + .rooms + .state_accessor + .room_state_get_content::( + room_id, + &StateEventType::RoomMember, + user_id.as_str(), + ) + .await + else { + debug_warn!( + "Trying to leave a room you are not a member of, marking room as left locally." + ); + + return services + .rooms + .state_cache + .update_membership( + room_id, + user_id, + default_member_content, + user_id, + None, + None, + true, + ) + .await; + }; + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(user_id.to_string(), &RoomMemberEventContent { + membership: MembershipState::Leave, + reason, + join_authorized_via_users_server: None, + is_direct: None, + ..event + }), + user_id, + room_id, + &state_lock, + ) + .await?; + } + + Ok(()) +} + +async fn remote_leave_room( + services: &Services, + user_id: &UserId, + room_id: &RoomId, + reason: Option, +) -> Result<()> { + let mut make_leave_response_and_server = + Err!(BadServerResponse("No remote server available to assist in leaving {room_id}.")); + + let mut servers: HashSet = services + .rooms + .state_cache + .servers_invite_via(room_id) + .map(ToOwned::to_owned) + .collect() + .await; + + match services + .rooms + .state_cache + .invite_state(user_id, room_id) + .await + { + | Ok(invite_state) => { + servers.extend( + invite_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + }, + | _ => { + match services + .rooms + .state_cache + .knock_state(user_id, room_id) + .await + { + | Ok(knock_state) => { + servers.extend( + knock_state + .iter() + .filter_map(|event| event.get_field("sender").ok().flatten()) + .filter_map(|sender: &str| UserId::parse(sender).ok()) + .filter_map(|sender| { + if !services.globals.user_is_local(sender) { + Some(sender.server_name().to_owned()) + } else { + None + } + }), + ); + }, + | _ => {}, + } + }, + } + + if let Some(room_id_server_name) = room_id.server_name() { + servers.insert(room_id_server_name.to_owned()); + } + + debug_info!("servers in remote_leave_room: {servers:?}"); + + for remote_server in servers { + let make_leave_response = services + .sending + .send_federation_request( + &remote_server, + federation::membership::prepare_leave_event::v1::Request { + room_id: room_id.to_owned(), + user_id: user_id.to_owned(), + }, + ) + .await; + + make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); + + if make_leave_response_and_server.is_ok() { + break; + } + } + + let (make_leave_response, remote_server) = make_leave_response_and_server?; + + let Some(room_version_id) = make_leave_response.room_version else { + return Err!(BadServerResponse(warn!( + "No room version was returned by {remote_server} for {room_id}, room version is \ + likely not supported by conduwuit" + ))); + }; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse(warn!( + "Remote room version {room_version_id} for {room_id} is not supported by conduwuit", + ))); + } + + let mut leave_event_stub = serde_json::from_str::( + make_leave_response.event.get(), + ) + .map_err(|e| { + err!(BadServerResponse(warn!( + "Invalid make_leave event json received from {remote_server} for {room_id}: {e:?}" + ))) + })?; + + // TODO: Is origin needed? + leave_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + leave_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + // Inject the reason key into the event content dict if it exists + if let Some(reason) = reason { + if let Some(CanonicalJsonValue::Object(content)) = leave_event_stub.get_mut("content") { + content.insert("reason".to_owned(), CanonicalJsonValue::String(reason)); + } + } + + // room v3 and above removed the "event_id" field from remote PDU format + match room_version_id { + | RoomVersionId::V1 | RoomVersionId::V2 => {}, + | _ => { + leave_event_stub.remove("event_id"); + }, + } + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut leave_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&leave_event_stub, &room_version_id)?; + + // Add event_id back + leave_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let leave_event = leave_event_stub; + + services + .sending + .send_federation_request( + &remote_server, + federation::membership::create_leave_event::v2::Request { + room_id: room_id.to_owned(), + event_id, + pdu: services + .sending + .convert_to_outgoing_federation_event(leave_event.clone()) + .await, + }, + ) + .await?; + + Ok(()) +} + +async fn knock_room_by_id_helper( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], +) -> Result { + let state_lock = services.rooms.state.mutex.lock(room_id).await; + + if services + .rooms + .state_cache + .is_invited(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already invited in {room_id} but attempted to knock"); + return Err!(Request(Forbidden( + "You cannot knock on a room you are already invited/accepted to." + ))); + } + + if services + .rooms + .state_cache + .is_joined(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already joined in {room_id} but attempted to knock"); + return Err!(Request(Forbidden("You cannot knock on a room you are already joined in."))); + } + + if services + .rooms + .state_cache + .is_knocked(sender_user, room_id) + .await + { + debug_warn!("{sender_user} is already knocked in {room_id}"); + return Ok(knock_room::v3::Response { room_id: room_id.into() }); + } + + if let Ok(membership) = services + .rooms + .state_accessor + .get_member(room_id, sender_user) + .await + { + if membership.membership == MembershipState::Ban { + debug_warn!("{sender_user} is banned from {room_id} but attempted to knock"); + return Err!(Request(Forbidden("You cannot knock on a room you are banned from."))); + } + } + + // For knock_restricted rooms, check if the user meets the restricted conditions + // If they do, attempt to join instead of knock + // This is not mentioned in the spec, but should be allowable (we're allowed to + // auto-join invites to knocked rooms) + let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await; + if let JoinRule::KnockRestricted(restricted) = &join_rule { + let restriction_rooms: Vec<_> = restricted + .allow + .iter() + .filter_map(|a| match a { + | AllowRule::RoomMembership(r) => Some(&r.room_id), + | _ => None, + }) + .collect(); + + // Check if the user is in any of the allowed rooms + let mut user_meets_restrictions = false; + for restriction_room_id in &restriction_rooms { + if services + .rooms + .state_cache + .is_joined(sender_user, restriction_room_id) + .await + { + user_meets_restrictions = true; + break; + } + } + + // If the user meets the restrictions, try joining instead + if user_meets_restrictions { + debug_info!( + "{sender_user} meets the restricted criteria in knock_restricted room \ + {room_id}, attempting to join instead of knock" + ); + // For this case, we need to drop the state lock and get a new one in + // join_room_by_id_helper We need to release the lock here and let + // join_room_by_id_helper acquire it again + drop(state_lock); + match join_room_by_id_helper( + services, + sender_user, + room_id, + reason.clone(), + servers, + None, + &None, + ) + .await + { + | Ok(_) => return Ok(knock_room::v3::Response::new(room_id.to_owned())), + | Err(e) => { + debug_warn!( + "Failed to convert knock to join for {sender_user} in {room_id}: {e:?}" + ); + // Get a new state lock for the remaining knock logic + let new_state_lock = services.rooms.state.mutex.lock(room_id).await; + + let server_in_room = services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await; + + let local_knock = server_in_room + || servers.is_empty() + || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); + + if local_knock { + knock_room_helper_local( + services, + sender_user, + room_id, + reason, + servers, + new_state_lock, + ) + .boxed() + .await?; + } else { + knock_room_helper_remote( + services, + sender_user, + room_id, + reason, + servers, + new_state_lock, + ) + .boxed() + .await?; + } + + return Ok(knock_room::v3::Response::new(room_id.to_owned())); + }, + } + } + } else if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) { + debug_warn!( + "{sender_user} attempted to knock on room {room_id} but its join rule is \ + {join_rule:?}, not knock or knock_restricted" + ); + } + + let server_in_room = services + .rooms + .state_cache + .server_in_room(services.globals.server_name(), room_id) + .await; + + let local_knock = server_in_room + || servers.is_empty() + || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])); + + if local_knock { + knock_room_helper_local(services, sender_user, room_id, reason, servers, state_lock) + .boxed() + .await?; + } else { + knock_room_helper_remote(services, sender_user, room_id, reason, servers, state_lock) + .boxed() + .await?; + } + + Ok(knock_room::v3::Response::new(room_id.to_owned())) +} + +async fn knock_room_helper_local( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + state_lock: RoomMutexGuard, +) -> Result { + debug_info!("We can knock locally"); + + let room_version_id = services.rooms.state.get_room_version(room_id).await?; + + if matches!( + room_version_id, + RoomVersionId::V1 + | RoomVersionId::V2 + | RoomVersionId::V3 + | RoomVersionId::V4 + | RoomVersionId::V5 + | RoomVersionId::V6 + ) { + return Err!(Request(Forbidden("This room does not support knocking."))); + } + + // Verify that this room has a valid knock or knock_restricted join rule + let join_rule = services.rooms.state_accessor.get_join_rules(room_id).await; + if !matches!(join_rule, JoinRule::Knock | JoinRule::KnockRestricted(_)) { + return Err!(Request(Forbidden("This room's join rule does not allow knocking."))); + } + + let content = RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason: reason.clone(), + ..RoomMemberEventContent::new(MembershipState::Knock) + }; + + // Try normal knock first + let Err(error) = services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder::state(sender_user.to_string(), &content), + sender_user, + room_id, + &state_lock, + ) + .await + else { + return Ok(()); + }; + + if servers.is_empty() || (servers.len() == 1 && services.globals.server_is_ours(&servers[0])) + { + return Err(error); + } + + warn!("We couldn't do the knock locally, maybe federation can help to satisfy the knock"); + + let (make_knock_response, remote_server) = + make_knock_request(services, sender_user, room_id, servers).await?; + + info!("make_knock finished"); + + let room_version_id = make_knock_response.room_version; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + + let mut knock_event_stub = serde_json::from_str::( + make_knock_response.event.get(), + ) + .map_err(|e| { + err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}")) + })?; + + knock_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + knock_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + knock_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + ..RoomMemberEventContent::new(MembershipState::Knock) + }) + .expect("event is valid, we just created it"), + ); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; + + // Add event_id + knock_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let knock_event = knock_event_stub; + + info!("Asking {remote_server} for send_knock in room {room_id}"); + let send_knock_request = federation::knock::send_knock::v1::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + pdu: services + .sending + .convert_to_outgoing_federation_event(knock_event.clone()) + .await, + }; + + let send_knock_response = services + .sending + .send_federation_request(&remote_server, send_knock_request) + .await?; + + info!("send_knock finished"); + + services + .rooms + .short + .get_or_create_shortroomid(room_id) + .await; + + info!("Parsing knock event"); + + let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone()) + .map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?; + + info!("Updating membership locally to knock state with provided stripped state events"); + services + .rooms + .state_cache + .update_membership( + room_id, + sender_user, + parsed_knock_pdu + .get_content::() + .expect("we just created this"), + sender_user, + Some(send_knock_response.knock_room_state), + None, + false, + ) + .await?; + + info!("Appending room knock event locally"); + services + .rooms + .timeline + .append_pdu( + &parsed_knock_pdu, + knock_event, + once(parsed_knock_pdu.event_id.borrow()), + &state_lock, + ) + .await?; + + Ok(()) +} + +async fn knock_room_helper_remote( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + reason: Option, + servers: &[OwnedServerName], + state_lock: RoomMutexGuard, +) -> Result { + info!("Knocking {room_id} over federation."); + + let (make_knock_response, remote_server) = + make_knock_request(services, sender_user, room_id, servers).await?; + + info!("make_knock finished"); + + let room_version_id = make_knock_response.room_version; + + if !services.server.supported_room_version(&room_version_id) { + return Err!(BadServerResponse( + "Remote room version {room_version_id} is not supported by conduwuit" + )); + } + + let mut knock_event_stub: CanonicalJsonObject = + serde_json::from_str(make_knock_response.event.get()).map_err(|e| { + err!(BadServerResponse("Invalid make_knock event json received from server: {e:?}")) + })?; + + knock_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services.globals.server_name().as_str().to_owned()), + ); + knock_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + knock_event_stub.insert( + "content".to_owned(), + to_canonical_value(RoomMemberEventContent { + displayname: services.users.displayname(sender_user).await.ok(), + avatar_url: services.users.avatar_url(sender_user).await.ok(), + blurhash: services.users.blurhash(sender_user).await.ok(), + reason, + ..RoomMemberEventContent::new(MembershipState::Knock) + }) + .expect("event is valid, we just created it"), + ); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs + // to be present + services + .server_keys + .hash_and_sign_event(&mut knock_event_stub, &room_version_id)?; + + // Generate event id + let event_id = gen_event_id(&knock_event_stub, &room_version_id)?; + + // Add event_id + knock_event_stub + .insert("event_id".to_owned(), CanonicalJsonValue::String(event_id.clone().into())); + + // It has enough fields to be called a proper event now + let knock_event = knock_event_stub; + + info!("Asking {remote_server} for send_knock in room {room_id}"); + let send_knock_request = federation::knock::send_knock::v1::Request { + room_id: room_id.to_owned(), + event_id: event_id.clone(), + pdu: services + .sending + .convert_to_outgoing_federation_event(knock_event.clone()) + .await, + }; + + let send_knock_response = services + .sending + .send_federation_request(&remote_server, send_knock_request) + .await?; + + info!("send_knock finished"); + + services + .rooms + .short + .get_or_create_shortroomid(room_id) + .await; + + info!("Parsing knock event"); + let parsed_knock_pdu = PduEvent::from_id_val(&event_id, knock_event.clone()) + .map_err(|e| err!(BadServerResponse("Invalid knock event PDU: {e:?}")))?; + + info!("Going through send_knock response knock state events"); + let state = send_knock_response + .knock_room_state + .iter() + .map(|event| serde_json::from_str::(event.clone().into_json().get())) + .filter_map(Result::ok); + + let mut state_map: HashMap = HashMap::new(); + + for event in state { + let Some(state_key) = event.get("state_key") else { + debug_warn!("send_knock stripped state event missing state_key: {event:?}"); + continue; + }; + let Some(event_type) = event.get("type") else { + debug_warn!("send_knock stripped state event missing event type: {event:?}"); + continue; + }; + + let Ok(state_key) = serde_json::from_value::(state_key.clone().into()) else { + debug_warn!("send_knock stripped state event has invalid state_key: {event:?}"); + continue; + }; + let Ok(event_type) = serde_json::from_value::(event_type.clone().into()) + else { + debug_warn!("send_knock stripped state event has invalid event type: {event:?}"); + continue; + }; + + let event_id = gen_event_id(&event, &room_version_id)?; + let shortstatekey = services + .rooms + .short + .get_or_create_shortstatekey(&event_type, &state_key) + .await; + + services.rooms.outlier.add_pdu_outlier(&event_id, &event); + state_map.insert(shortstatekey, event_id.clone()); + } + + info!("Compressing state from send_knock"); + let compressed: CompressedState = services + .rooms + .state_compressor + .compress_state_events(state_map.iter().map(|(ssk, eid)| (ssk, eid.borrow()))) + .collect() + .await; + + debug!("Saving compressed state"); + let HashSetCompressStateEvent { + shortstatehash: statehash_before_knock, + added, + removed, + } = services + .rooms + .state_compressor + .save_state(room_id, Arc::new(compressed)) + .await?; + + debug!("Forcing state for new room"); + services + .rooms + .state + .force_state(room_id, statehash_before_knock, added, removed, &state_lock) + .await?; + + let statehash_after_knock = services + .rooms + .state + .append_to_state(&parsed_knock_pdu) + .await?; + + info!("Updating membership locally to knock state with provided stripped state events"); + services + .rooms + .state_cache + .update_membership( + room_id, + sender_user, + parsed_knock_pdu + .get_content::() + .expect("we just created this"), + sender_user, + Some(send_knock_response.knock_room_state), + None, + false, + ) + .await?; + + info!("Appending room knock event locally"); + services + .rooms + .timeline + .append_pdu( + &parsed_knock_pdu, + knock_event, + once(parsed_knock_pdu.event_id.borrow()), + &state_lock, + ) + .await?; + + info!("Setting final room state for new room"); + // We set the room state after inserting the pdu, so that we never have a moment + // in time where events in the current room state do not exist + services + .rooms + .state + .set_room_state(room_id, statehash_after_knock, &state_lock); + + Ok(()) +} + +async fn make_knock_request( + services: &Services, + sender_user: &UserId, + room_id: &RoomId, + servers: &[OwnedServerName], +) -> Result<(federation::knock::create_knock_event_template::v1::Response, OwnedServerName)> { + let mut make_knock_response_and_server = + Err!(BadServerResponse("No server available to assist in knocking.")); + + let mut make_knock_counter: usize = 0; + + for remote_server in servers { + if services.globals.server_is_ours(remote_server) { + continue; + } + + info!("Asking {remote_server} for make_knock ({make_knock_counter})"); + + let make_knock_response = services + .sending + .send_federation_request( + remote_server, + federation::knock::create_knock_event_template::v1::Request { + room_id: room_id.to_owned(), + user_id: sender_user.to_owned(), + ver: services.server.supported_room_versions().collect(), + }, + ) + .await; + + trace!("make_knock response: {make_knock_response:?}"); + make_knock_counter = make_knock_counter.saturating_add(1); + + make_knock_response_and_server = make_knock_response.map(|r| (r, remote_server.clone())); + + if make_knock_response_and_server.is_ok() { + break; + } + + if make_knock_counter > 40 { + warn!( + "50 servers failed to provide valid make_knock response, assuming no server can \ + assist in knocking." + ); + make_knock_response_and_server = + Err!(BadServerResponse("No server available to assist in knocking.")); + + return make_knock_response_and_server; + } + } + + make_knock_response_and_server +} diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index ba9c013d..636d3189 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -616,7 +616,7 @@ where .map(ToOwned::to_owned), ) }; - + debug!("running auth check on {:?}", event.event_id()); let auth_result = auth_check(room_version, &event, current_third_party, fetch_state).await; diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index fad9ac74..0930c96f 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -80,7 +80,7 @@ where // 5. Reject "due to auth events" if can't get all the auth events or some of // the auth events are also rejected "due to auth events" // NOTE: Step 5 is not applied anymore because it failed too often - debug!("Fetching auth events"); + debug!("Fetching auth events for {}", incoming_pdu.event_id); Box::pin(self.fetch_and_handle_outliers( origin, pdu_event.auth_events(), @@ -92,12 +92,12 @@ where // 6. Reject "due to auth events" if the event doesn't pass auth based on the // auth events - debug!("Checking based on auth events"); + debug!("Checking {} based on auth events", incoming_pdu.event_id); // Build map of auth events let mut auth_events = HashMap::with_capacity(pdu_event.auth_events().count()); for id in pdu_event.auth_events() { let Ok(auth_event) = self.services.timeline.get_pdu(id).await else { - warn!("Could not find auth event {id}"); + warn!("Could not find auth event {id} for {}", incoming_pdu.event_id); continue; }; @@ -131,6 +131,7 @@ where ready(auth_events.get(&key).map(ToOwned::to_owned)) }; + debug!("running auth check to handle outlier pdu {:?}", incoming_pdu.event_id); let auth_check = state_res::event_auth::auth_check( &to_room_version(&room_version_id), &pdu_event, diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 4093cb05..0dca2d70 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -1,12 +1,6 @@ use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant}; -use conduwuit::{ - Err, Result, debug, debug_info, err, implement, is_equal_to, - matrix::{Event, EventTypeExt, PduEvent, StateKey, state_res}, - trace, - utils::stream::{BroadbandExt, ReadyExt}, - warn, -}; +use conduwuit::{Err, Result, debug, debug_info, err, implement, matrix::{EventTypeExt, PduEvent, StateKey, state_res}, trace, utils::stream::{BroadbandExt, ReadyExt}, warn, info}; use futures::{FutureExt, StreamExt, future::ready}; use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType}; @@ -17,22 +11,19 @@ use crate::rooms::{ }; #[implement(super::Service)] -pub(super) async fn upgrade_outlier_to_timeline_pdu( +pub(super) async fn upgrade_outlier_to_timeline_pdu( &self, incoming_pdu: PduEvent, val: BTreeMap, - create_event: &Pdu, + create_event: &PduEvent, origin: &ServerName, room_id: &RoomId, -) -> Result> -where - Pdu: Event + Send + Sync, -{ +) -> Result> { // Skip the PDU if we already have it as a timeline event if let Ok(pduid) = self .services .timeline - .get_pdu_id(incoming_pdu.event_id()) + .get_pdu_id(&incoming_pdu.event_id) .await { return Ok(Some(pduid)); @@ -41,13 +32,13 @@ where if self .services .pdu_metadata - .is_event_soft_failed(incoming_pdu.event_id()) + .is_event_soft_failed(&incoming_pdu.event_id) .await { return Err!(Request(InvalidParam("Event has been soft failed"))); } - debug!("Upgrading to timeline pdu"); + debug!("Upgrading pdu {} from outlier to timeline pdu", incoming_pdu.event_id); let timer = Instant::now(); let room_version_id = get_room_version_id(create_event)?; @@ -55,8 +46,8 @@ where // backwards extremities doing all the checks in this list starting at 1. // These are not timeline events. - debug!("Resolving state at event"); - let mut state_at_incoming_event = if incoming_pdu.prev_events().count() == 1 { + debug!("Resolving state at event {}", incoming_pdu.event_id); + let mut state_at_incoming_event = if incoming_pdu.prev_events.len() == 1 { self.state_at_incoming_degree_one(&incoming_pdu).await? } else { self.state_at_incoming_resolved(&incoming_pdu, room_id, &room_version_id) @@ -65,16 +56,15 @@ where if state_at_incoming_event.is_none() { state_at_incoming_event = self - .fetch_state(origin, create_event, room_id, incoming_pdu.event_id()) + .fetch_state(origin, create_event, room_id, &incoming_pdu.event_id) .await?; } let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above"); - let room_version = to_room_version(&room_version_id); - debug!("Performing auth check"); + debug!("Performing auth check to upgrade {}", incoming_pdu.event_id); // 11. Check the auth of the event passes based on the state of the event let state_fetch_state = &state_at_incoming_event; let state_fetch = |k: StateEventType, s: StateKey| async move { @@ -84,6 +74,7 @@ where self.services.timeline.get_pdu(event_id).await.ok() }; + debug!("running auth check on {}", incoming_pdu.event_id); let auth_check = state_res::event_auth::auth_check( &room_version, &incoming_pdu, @@ -97,24 +88,25 @@ where return Err!(Request(Forbidden("Event has failed auth check with state at the event."))); } - debug!("Gathering auth events"); + debug!("Gathering auth events for {}", incoming_pdu.event_id); let auth_events = self .services .state .get_auth_events( room_id, - incoming_pdu.kind(), - incoming_pdu.sender(), - incoming_pdu.state_key(), - incoming_pdu.content(), + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, ) .await?; let state_fetch = |k: &StateEventType, s: &str| { let key = k.with_state_key(s); - ready(auth_events.get(&key).map(ToOwned::to_owned)) + ready(auth_events.get(&key).cloned()) }; + debug!("running auth check on {} with claimed state auth", incoming_pdu.event_id); let auth_check = state_res::event_auth::auth_check( &room_version, &incoming_pdu, @@ -125,7 +117,7 @@ where .map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?; // Soft fail check before doing state res - debug!("Performing soft-fail check"); + debug!("Performing soft-fail check on {}", incoming_pdu.event_id); let soft_fail = match (auth_check, incoming_pdu.redacts_id(&room_version_id)) { | (false, _) => true, | (true, None) => false, @@ -133,7 +125,7 @@ where !self .services .state_accessor - .user_can_redact(&redact_id, incoming_pdu.sender(), incoming_pdu.room_id(), true) + .user_can_redact(&redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true) .await?, }; @@ -153,7 +145,7 @@ where .map(ToOwned::to_owned) .ready_filter(|event_id| { // Remove any that are referenced by this incoming event's prev_events - !incoming_pdu.prev_events().any(is_equal_to!(event_id)) + !incoming_pdu.prev_events.contains(event_id) }) .broad_filter_map(|event_id| async move { // Only keep those extremities were not referenced yet @@ -170,7 +162,7 @@ where debug!( "Retained {} extremities checked against {} prev_events", extremities.len(), - incoming_pdu.prev_events().count() + incoming_pdu.prev_events.len() ); let state_ids_compressed: Arc = self @@ -185,20 +177,20 @@ where .map(Arc::new) .await; - if incoming_pdu.state_key().is_some() { + if incoming_pdu.state_key.is_some() { debug!("Event is a state-event. Deriving new room state"); // We also add state after incoming event to the fork states let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = incoming_pdu.state_key() { + if let Some(state_key) = &incoming_pdu.state_key { let shortstatekey = self .services .short - .get_or_create_shortstatekey(&incoming_pdu.kind().to_string().into(), state_key) + .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key) .await; - let event_id = incoming_pdu.event_id(); - state_after.insert(shortstatekey, event_id.to_owned()); + let event_id = &incoming_pdu.event_id; + state_after.insert(shortstatekey, event_id.clone()); } let new_room_state = self @@ -222,7 +214,7 @@ where // 14. Check if the event passes auth based on the "current state" of the room, // if not soft fail it if soft_fail { - debug!("Soft failing event"); + info!("Soft failing event {}", incoming_pdu.event_id); let extremities = extremities.iter().map(Borrow::borrow); self.services @@ -240,9 +232,9 @@ where // Soft fail, we keep the event as an outlier but don't add it to the timeline self.services .pdu_metadata - .mark_event_soft_failed(incoming_pdu.event_id()); + .mark_event_soft_failed(&incoming_pdu.event_id); - warn!("Event was soft failed: {:?}", incoming_pdu.event_id()); + warn!("Event was soft failed: {incoming_pdu:?}"); return Err!(Request(InvalidParam("Event has been soft failed"))); } @@ -253,7 +245,7 @@ where let extremities = extremities .iter() .map(Borrow::borrow) - .chain(once(incoming_pdu.event_id())); + .chain(once(incoming_pdu.event_id.borrow())); let pdu_id = self .services diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 02efbfe0..86826dda 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -785,6 +785,7 @@ impl Service { ready(auth_events.get(&key)) }; + debug!("running auth check on new {} event by {} in {}", pdu.kind, pdu.sender, pdu.room_id); let auth_check = state_res::auth_check( &room_version, &pdu, From 879a59a1e44b5ae6890a0793c6d42185f8f6bffb Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Thu, 12 Jun 2025 01:05:45 +0100 Subject: [PATCH 05/11] modify more log strings so they're more useful than not --- src/core/matrix/state_res/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index 636d3189..771e364f 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -735,7 +735,7 @@ where { let mut room_id = None; while let Some(sort_ev) = event { - debug!(event_id = sort_ev.event_id().as_str(), "mainline"); + trace!(event_id = sort_ev.event_id().as_str(), "mainline"); if room_id.is_none() { room_id = Some(sort_ev.room_id().to_owned()); } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 86826dda..8c80f86b 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1171,7 +1171,7 @@ impl Service { .boxed(); while let Some(ref backfill_server) = servers.next().await { - info!("Asking {backfill_server} for backfill"); + info!("Asking {backfill_server} for backfill in {:?}", room_id.to_owned()); let response = self .services .sending @@ -1199,7 +1199,7 @@ impl Service { } } - info!("No servers could backfill, but backfill was needed in room {room_id}"); + warn!("No servers could backfill, but backfill was needed in room {room_id}"); Ok(()) } From 7923e05982a4b02b95fffb0937b4530026c22b70 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:02:49 -0700 Subject: [PATCH 06/11] change rocksdb stats level to 3 scale rocksdb background jobs and subcompactions change rocksdb default error level to info from error delete unused num_threads function fix warns from cargo --- conduwuit-example.toml | 2 +- src/core/config/mod.rs | 6 +++--- src/database/engine/db_opts.rs | 22 ++++------------------ 3 files changed, 8 insertions(+), 22 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index d85281aa..f744a07d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1001,7 +1001,7 @@ # 3 to 5 = Statistics with possible performance impact. # 6 = All statistics. # -#rocksdb_stats_level = 1 +#rocksdb_stats_level = 3 # This is a password that can be configured that will let you login to the # server bot account (currently `@conduit`) for emergency troubleshooting diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 390a253e..881decfa 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1171,7 +1171,7 @@ pub struct Config { /// 3 to 5 = Statistics with possible performance impact. /// 6 = All statistics. /// - /// default: 1 + /// default: 3 #[serde(default = "default_rocksdb_stats_level")] pub rocksdb_stats_level: u8, @@ -2215,7 +2215,7 @@ fn default_typing_client_timeout_max_s() -> u64 { 45 } fn default_rocksdb_recovery_mode() -> u8 { 1 } -fn default_rocksdb_log_level() -> String { "error".to_owned() } +fn default_rocksdb_log_level() -> String { "info".to_owned() } fn default_rocksdb_log_time_to_roll() -> usize { 0 } @@ -2247,7 +2247,7 @@ fn default_rocksdb_compression_level() -> i32 { 32767 } #[allow(clippy::doc_markdown)] fn default_rocksdb_bottommost_compression_level() -> i32 { 32767 } -fn default_rocksdb_stats_level() -> u8 { 1 } +fn default_rocksdb_stats_level() -> u8 { 3 } // I know, it's a great name #[must_use] diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 18cec742..1299443d 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -1,8 +1,6 @@ -use std::{cmp, convert::TryFrom}; - -use conduwuit::{Config, Result, utils}; +use conduwuit::{Config, Result}; use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel}; - +use conduwuit::config::{parallelism_scaled_i32, parallelism_scaled_u32}; use super::{cf_opts::cache_size_f64, logger::handle as handle_log}; /// Create database-wide options suitable for opening the database. This also @@ -23,8 +21,8 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul set_logging_defaults(&mut opts, config); // Processing - opts.set_max_background_jobs(num_threads::(config)?); - opts.set_max_subcompactions(num_threads::(config)?); + opts.set_max_background_jobs(parallelism_scaled_i32(1)); + opts.set_max_subcompactions(parallelism_scaled_u32(1)); opts.set_avoid_unnecessary_blocking_io(true); opts.set_max_file_opening_threads(0); @@ -126,15 +124,3 @@ fn set_logging_defaults(opts: &mut Options, config: &Config) { opts.set_callback_logger(rocksdb_log_level, &handle_log); } } - -fn num_threads>(config: &Config) -> Result { - const MIN_PARALLELISM: usize = 2; - - let requested = if config.rocksdb_parallelism_threads != 0 { - config.rocksdb_parallelism_threads - } else { - utils::available_parallelism() - }; - - utils::math::try_into::(cmp::max(MIN_PARALLELISM, requested)) -} From 9093ee24851747a48c80cde73406f832b4d594ed Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Wed, 18 Jun 2025 12:48:27 -0700 Subject: [PATCH 07/11] make fetching key room events less smart --- src/core/matrix/state_res/event_auth.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 830b5460..8833cbfb 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -242,12 +242,16 @@ where } */ - let (room_create_event, power_levels_event, sender_member_event) = join3( - fetch_state(&StateEventType::RoomCreate, ""), - fetch_state(&StateEventType::RoomPowerLevels, ""), - fetch_state(&StateEventType::RoomMember, sender.as_str()), - ) - .await; + // let (room_create_event, power_levels_event, sender_member_event) = join3( + // fetch_state(&StateEventType::RoomCreate, ""), + // fetch_state(&StateEventType::RoomPowerLevels, ""), + // fetch_state(&StateEventType::RoomMember, sender.as_str()), + // ) + // .await; + + let room_create_event = fetch_state(&StateEventType::RoomCreate, "").await; + let power_levels_event = fetch_state(&StateEventType::RoomPowerLevels, "").await; + let sender_member_event = fetch_state(&StateEventType::RoomMember, sender.as_str()).await; let room_create_event = match room_create_event { | None => { From 1186eace4dc512446795887c70d440f0e5729564 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Wed, 18 Jun 2025 16:57:19 -0700 Subject: [PATCH 08/11] lock the getter instead ??? c/o M --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 2 +- src/service/rooms/state/mod.rs | 1 + src/service/rooms/timeline/mod.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 0dca2d70..cbebee4b 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -141,7 +141,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( let extremities: Vec<_> = self .services .state - .get_forward_extremities(room_id) + .get_forward_extremities(room_id, &state_lock) .map(ToOwned::to_owned) .ready_filter(|event_id| { // Remove any that are referenced by this incoming event's prev_events diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 641aa6a9..92881126 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -388,6 +388,7 @@ impl Service { pub fn get_forward_extremities<'a>( &'a self, room_id: &'a RoomId, + _state_lock: &'a RoomMutexGuard, ) -> impl Stream + Send + '_ { let prefix = (room_id, Interfix); diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 8c80f86b..2a4418d8 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -656,7 +656,7 @@ impl Service { let prev_events: Vec = self .services .state - .get_forward_extremities(room_id) + .get_forward_extremities(room_id, _mutex_lock) .take(20) .map(Into::into) .collect() From 2e4d4d09c86eecc2f82ac5567d2e660ca054eaba Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 06:52:40 -0700 Subject: [PATCH 09/11] vehicle loan documentation now available at window 7 also print event id --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index cbebee4b..5c60ab71 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -159,6 +159,8 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu( .collect() .await; + if extremities.len() == 0 { info!("Retained zero extremities when upgrading outlier PDU to timeline PDU with {} previous events, event id: {}", incoming_pdu.prev_events.len(), incoming_pdu.event_id) } + debug!( "Retained {} extremities checked against {} prev_events", extremities.len(), From 2b5bbbb6386a3d91a3200c797b406dbaf29d1552 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:13:30 -0700 Subject: [PATCH 10/11] sender_workers scaling. this time, with feeling! --- src/core/config/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 881decfa..80d26972 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1839,9 +1839,9 @@ pub struct Config { pub stream_amplification: usize, /// Number of sender task workers; determines sender parallelism. Default is - /// '4'. Override by setting a different value. Values clamped 1 to core count. + /// core count. Override by setting a different value. /// - /// default: 4 + /// default: core count #[serde(default = "default_sender_workers")] pub sender_workers: usize, @@ -2328,7 +2328,7 @@ fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } -fn default_sender_workers() -> usize { 4 } +fn default_sender_workers() -> usize { parallelism_scaled(1) } fn default_client_receive_timeout() -> u64 { 75 } From 676bfffd1d8d91fab0354f8bcd893ecb685bf22c Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Mon, 30 Jun 2025 15:25:11 -0700 Subject: [PATCH 11/11] more funny settings (part 3 of 12) --- src/core/config/mod.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 80d26972..8ef580f1 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2075,40 +2075,41 @@ fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64( fn default_db_cache_capacity_mb() -> f64 { 512.0 + parallelism_scaled_f64(512.0) } -fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(500_000) } +fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_cache_capacity_modifier() -> f64 { 1.0 } fn default_auth_chain_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_shorteventid_cache_capacity() -> u32 { - parallelism_scaled_u32(100_000).saturating_add(500_000) + parallelism_scaled_u32(100_000).saturating_add(100_000) } fn default_eventidshort_cache_capacity() -> u32 { - parallelism_scaled_u32(100_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_eventid_pdu_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_shortstatekey_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_statekeyshort_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_servernameevent_data_cache_capacity() -> u32 { - parallelism_scaled_u32(200_000).saturating_add(500_000) + parallelism_scaled_u32(100_000).saturating_add(100_000) } fn default_stateinfo_cache_capacity() -> u32 { - parallelism_scaled_u32(500).clamp(100, 12000) } + parallelism_scaled_u32(500).clamp(100, 12000) +} fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(500).clamp(100, 12000) }