From 3b5d5dcefa4a7932b7d6bf4b801bbc1a6c1e341f Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Fri, 25 Apr 2025 21:06:00 -0700 Subject: [PATCH 01/49] probably incorrectly delete support for non-standardized matrix srv record --- src/service/resolver/actual.rs | 37 ++++++++++++++++------------------ 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index d23ef95a..52cd5d7d 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -306,28 +306,25 @@ impl super::Service { #[tracing::instrument(name = "srv", level = "debug", skip(self))] async fn query_srv_record(&self, hostname: &'_ str) -> Result> { - let hostnames = - [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")]; + self.services.server.check_running()?; - for hostname in hostnames { - self.services.server.check_running()?; + debug!("querying SRV for {hostname:?}"); - debug!("querying SRV for {hostname:?}"); - let hostname = hostname.trim_end_matches('.'); - match self.resolver.resolver.srv_lookup(hostname).await { - | Err(e) => Self::handle_resolve_error(&e, hostname)?, - | Ok(result) => { - return Ok(result.iter().next().map(|result| { - FedDest::Named( - result.target().to_string().trim_end_matches('.').to_owned(), - format!(":{}", result.port()) - .as_str() - .try_into() - .unwrap_or_else(|_| FedDest::default_port()), - ) - })); - }, - } + let hostname_suffix = format!("_matrix-fed._tcp.{hostname}."); + let hostname = hostname_suffix.trim_end_matches('.'); + match self.resolver.resolver.srv_lookup(hostname).await { + | Err(e) => Self::handle_resolve_error(&e, hostname)?, + | Ok(result) => { + return Ok(result.iter().next().map(|result| { + FedDest::Named( + result.target().to_string().trim_end_matches('.').to_owned(), + format!(":{}", result.port()) + .as_str() + .try_into() + .unwrap_or_else(|_| FedDest::default_port()), + ) + })); + }, } Ok(None) From ae8127d44b2eea6ff07c623694e139a4c973e431 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Wed, 14 May 2025 06:53:00 -0700 Subject: [PATCH 02/49] bump the number of allowed immutable memtables by 1, to allow for greater flood protection this should probably not be applied if you have rocksdb_atomic_flush = false (the default) --- src/database/engine/cf_opts.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index cbbd1012..666f9f9e 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -29,7 +29,7 @@ fn descriptor_cf_options( set_table_options(&mut opts, &desc, cache)?; opts.set_min_write_buffer_number(1); - opts.set_max_write_buffer_number(2); + opts.set_max_write_buffer_number(3); opts.set_write_buffer_size(desc.write_size); opts.set_target_file_size_base(desc.file_size); From a62e658e6543f1f752536bd331c7fabdf7cf9ab3 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:02:05 -0700 Subject: [PATCH 03/49] upgrade some settings to enable 5g in continuwuity enable converged 6g at the edge in continuwuity better stateinfo_cache_capacity default better roomid_spacehierarchy_cache_capacity make sender workers default better and clamp value to core count update sender workers documentation add more parallelism_scaled and make them public update 1 document --- conduwuit-example.toml | 6 +-- src/core/config/mod.rs | 75 +++++++++++++++++++------------------- src/service/sending/mod.rs | 12 ++---- 3 files changed, 43 insertions(+), 50 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index bdc2f570..2c3721d0 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1655,11 +1655,9 @@ #stream_amplification = 1024 # Number of sender task workers; determines sender parallelism. Default is -# '0' which means the value is determined internally, likely matching the -# number of tokio worker-threads or number of cores, etc. Override by -# setting a non-zero value. +# number of CPU cores. Override by setting a different value. # -#sender_workers = 0 +#sender_workers = 4 # Enables listener sockets; can be set to false to disable listening. This # option is intended for developer/diagnostic purposes only. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index d93acd9b..17da6492 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1889,12 +1889,10 @@ pub struct Config { pub stream_amplification: usize, /// Number of sender task workers; determines sender parallelism. Default is - /// '0' which means the value is determined internally, likely matching the - /// number of tokio worker-threads or number of cores, etc. Override by - /// setting a non-zero value. + /// '4'. Override by setting a different value. Values clamped 1 to core count. /// - /// default: 0 - #[serde(default)] + /// default: 4 + #[serde(default = "default_sender_workers")] pub sender_workers: usize, /// Enables listener sockets; can be set to false to disable listening. This @@ -2125,45 +2123,47 @@ fn default_database_backups_to_keep() -> i16 { 1 } fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) } -fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) } +fn default_db_cache_capacity_mb() -> f64 { 512.0 + parallelism_scaled_f64(512.0) } -fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } +fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(500_000) } fn default_cache_capacity_modifier() -> f64 { 1.0 } fn default_auth_chain_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) + parallelism_scaled_u32(50_000).saturating_add(500_000) } fn default_shorteventid_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(100_000) -} - -fn default_eventidshort_cache_capacity() -> u32 { - parallelism_scaled_u32(25_000).saturating_add(100_000) -} - -fn default_eventid_pdu_cache_capacity() -> u32 { - parallelism_scaled_u32(25_000).saturating_add(100_000) -} - -fn default_shortstatekey_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) -} - -fn default_statekeyshort_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) -} - -fn default_servernameevent_data_cache_capacity() -> u32 { parallelism_scaled_u32(100_000).saturating_add(500_000) } -fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) } +fn default_eventidshort_cache_capacity() -> u32 { + parallelism_scaled_u32(100_000).saturating_add(500_000) +} -fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } +fn default_eventid_pdu_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(500_000) +} -fn default_dns_cache_entries() -> u32 { 32768 } +fn default_shortstatekey_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(500_000) +} + +fn default_statekeyshort_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(500_000) +} + +fn default_servernameevent_data_cache_capacity() -> u32 { + parallelism_scaled_u32(200_000).saturating_add(500_000) +} + +fn default_stateinfo_cache_capacity() -> u32 { + parallelism_scaled_u32(500).clamp(100, 12000) } + +fn default_roomid_spacehierarchy_cache_capacity() -> u32 { + parallelism_scaled_u32(500).clamp(100, 12000) } + +fn default_dns_cache_entries() -> u32 { 327680 } fn default_dns_min_ttl() -> u64 { 60 * 180 } @@ -2352,14 +2352,13 @@ fn default_admin_log_capture() -> String { fn default_admin_room_tag() -> String { "m.server_notice".to_owned() } #[allow(clippy::as_conversions, clippy::cast_precision_loss)] -fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) } +pub fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) } -fn parallelism_scaled_u32(val: u32) -> u32 { - let val = val.try_into().expect("failed to cast u32 to usize"); - parallelism_scaled(val).try_into().unwrap_or(u32::MAX) -} +pub fn parallelism_scaled_u32(val: u32) -> u32 { val.saturating_mul(sys::available_parallelism() as u32) } -fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) } +pub fn parallelism_scaled_i32(val: i32) -> i32 { val.saturating_mul(sys::available_parallelism() as i32) } + +pub fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) } fn default_trusted_server_batch_size() -> usize { 256 } @@ -2379,6 +2378,8 @@ fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } +fn default_sender_workers() -> usize { 4 } + fn default_client_receive_timeout() -> u64 { 75 } fn default_client_request_timeout() -> u64 { 180 } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 08ca7010..ce687551 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -401,16 +401,10 @@ impl Service { fn num_senders(args: &crate::Args<'_>) -> usize { const MIN_SENDERS: usize = 1; - // Limit the number of senders to the number of workers threads or number of - // cores, conservatively. - let max_senders = args - .server - .metrics - .num_workers() - .min(available_parallelism()); + // Limit the maximum number of senders to the number of cores. + let max_senders = available_parallelism(); - // If the user doesn't override the default 0, this is intended to then default - // to 1 for now as multiple senders is experimental. + // default is 4 senders. clamp between 1 and core count. args.server .config .sender_workers From bb4b625f631cd8370f50a6ac60b543a549f36afa Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 7 Jun 2025 00:46:55 +0100 Subject: [PATCH 04/49] fix an auth rule not applying correctly --- src/core/matrix/state_res/event_auth.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 5c36ce03..0b5b72d7 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -255,6 +255,16 @@ where }, | Some(e) => e, }; + // just re-check 1.2 to work around a bug + let Some(room_id_server_name) = incoming_event.room_id().server_name() else { + warn!("room ID has no servername"); + return Ok(false); + }; + + if room_id_server_name != sender.server_name() { + warn!("servername of room ID does not match servername of m.room.create sender"); + return Ok(false); + } if incoming_event.room_id() != room_create_event.room_id() { warn!("room_id of incoming event does not match room_id of m.room.create event"); From bf3dd254e858eee6cbe433d6aacafe5ef6f37a94 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 7 Jun 2025 00:55:03 +0100 Subject: [PATCH 05/49] Note about ruma#2064 in TODO --- src/core/matrix/state_res/event_auth.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 0b5b72d7..40c32e03 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -217,8 +217,9 @@ where } /* - // TODO: In the past this code caused problems federating with synapse, maybe this has been - // resolved already. Needs testing. + // TODO: In the past this code was commented as it caused problems with Synapse. This is no + // longer the case. This needs to be implemented. + // See also: https://github.com/ruma/ruma/pull/2064 // // 2. Reject if auth_events // a. auth_events cannot have duplicate keys since it's a BTree From e147f0f2745abdfc0a40b48f8ec53b222d266cc4 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 10 Jun 2025 22:33:31 +0100 Subject: [PATCH 06/49] Kick up a fuss when m.room.create is unfindable --- src/core/matrix/state_res/event_auth.rs | 4 ++-- src/core/matrix/state_res/mod.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 40c32e03..31c660ed 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -30,7 +30,7 @@ use super::{ }, room_version::RoomVersion, }; -use crate::{debug, error, trace, warn}; +use crate::{debug, err_log, error, trace, warn}; // FIXME: field extracting could be bundled for `content` #[derive(Deserialize)] @@ -251,7 +251,7 @@ where let room_create_event = match room_create_event { | None => { - warn!("no m.room.create event in auth chain"); + error!("no m.room.create event in auth chain for {}!", incoming_event.event_id()); return Ok(false); }, | Some(e) => e, diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index ce9d9276..e721e14c 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -753,7 +753,7 @@ where } } } - // Did not find a power level event so we default to zero + warn!("could not find a power event in the mainline map, defaulting to zero depth"); Ok(0) } From a61fd287ef7d1937e4ce0d3b51dbed6b31966cc4 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 10 Jun 2025 23:00:09 +0100 Subject: [PATCH 07/49] Fix room ID check --- src/core/matrix/state_res/event_auth.rs | 11 +++++++---- src/service/rooms/event_handler/handle_outlier_pdu.rs | 5 +---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 31c660ed..de4d20e1 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -30,7 +30,7 @@ use super::{ }, room_version::RoomVersion, }; -use crate::{debug, err_log, error, trace, warn}; +use crate::{debug, error, trace, warn}; // FIXME: field extracting could be bundled for `content` #[derive(Deserialize)] @@ -251,7 +251,7 @@ where let room_create_event = match room_create_event { | None => { - error!("no m.room.create event in auth chain for {}!", incoming_event.event_id()); + error!("no m.room.create event found for {}!", incoming_event.event_id()); return Ok(false); }, | Some(e) => e, @@ -262,8 +262,11 @@ where return Ok(false); }; - if room_id_server_name != sender.server_name() { - warn!("servername of room ID does not match servername of m.room.create sender"); + if room_id_server_name != room_create_event.sender().server_name() { + warn!( + "servername of room ID origin ({}) does not match servername of m.room.create sender ({})", + room_id_server_name, + room_create_event.sender().server_name()); return Ok(false); } diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index d79eed77..fad9ac74 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -122,10 +122,7 @@ where } // The original create event must be in the auth events - if !matches!( - auth_events.get(&(StateEventType::RoomCreate, String::new().into())), - Some(_) | None - ) { + if !auth_events.contains_key(&(StateEventType::RoomCreate, String::new().into())) { return Err!(Request(InvalidParam("Incoming event refers to wrong create event."))); } From 5dafe80527ba3c26775985eb328e3621f5957e3a Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 01:27:25 +0100 Subject: [PATCH 08/49] more logs --- src/core/matrix/state_res/event_auth.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index de4d20e1..fc1119de 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -13,6 +13,7 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, third_party_invite::RoomThirdPartyInviteEventContent, }, + EventId, int, serde::{Base64, Raw}, }; @@ -21,7 +22,6 @@ use serde::{ de::{Error as _, IgnoredAny}, }; use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; - use super::{ Error, Event, Result, StateEventType, StateKey, TimelineEventType, power_levels::{ @@ -251,7 +251,14 @@ where let room_create_event = match room_create_event { | None => { - error!("no m.room.create event found for {}!", incoming_event.event_id()); + error!( + create_event = room_create_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + power_levels = power_levels_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + member_event = sender_member_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + "no m.room.create event found for {} ({})!", + incoming_event.event_id().as_str(), + incoming_event.room_id().as_str() + ); return Ok(false); }, | Some(e) => e, From 59042ed096cc341342c31c9c53c164b76ec7ca2f Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 01:42:19 +0100 Subject: [PATCH 09/49] log which room struggled to get mainline depth --- src/core/matrix/state_res/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index e721e14c..ba9c013d 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -733,8 +733,12 @@ where Fut: Future> + Send, E: Event + Send + Sync, { + let mut room_id = None; while let Some(sort_ev) = event { debug!(event_id = sort_ev.event_id().as_str(), "mainline"); + if room_id.is_none() { + room_id = Some(sort_ev.room_id().to_owned()); + } let id = sort_ev.event_id(); if let Some(depth) = mainline_map.get(id) { @@ -753,7 +757,7 @@ where } } } - warn!("could not find a power event in the mainline map, defaulting to zero depth"); + warn!("could not find a power event in the mainline map for {room_id:?}, defaulting to zero depth"); Ok(0) } From 13bd3edbca34c1fcf5282fce947696ee22f8bb95 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:02:49 -0700 Subject: [PATCH 10/49] change rocksdb stats level to 3 scale rocksdb background jobs and subcompactions change rocksdb default error level to info from error delete unused num_threads function fix warns from cargo --- conduwuit-example.toml | 2 +- src/core/config/mod.rs | 6 +++--- src/database/engine/db_opts.rs | 22 ++++------------------ 3 files changed, 8 insertions(+), 22 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 2c3721d0..a04819df 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1041,7 +1041,7 @@ # 3 to 5 = Statistics with possible performance impact. # 6 = All statistics. # -#rocksdb_stats_level = 1 +#rocksdb_stats_level = 3 # This is a password that can be configured that will let you login to the # server bot account (currently `@conduit`) for emergency troubleshooting diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 17da6492..f6f2cfc3 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1207,7 +1207,7 @@ pub struct Config { /// 3 to 5 = Statistics with possible performance impact. /// 6 = All statistics. /// - /// default: 1 + /// default: 3 #[serde(default = "default_rocksdb_stats_level")] pub rocksdb_stats_level: u8, @@ -2265,7 +2265,7 @@ fn default_typing_client_timeout_max_s() -> u64 { 45 } fn default_rocksdb_recovery_mode() -> u8 { 1 } -fn default_rocksdb_log_level() -> String { "error".to_owned() } +fn default_rocksdb_log_level() -> String { "info".to_owned() } fn default_rocksdb_log_time_to_roll() -> usize { 0 } @@ -2297,7 +2297,7 @@ fn default_rocksdb_compression_level() -> i32 { 32767 } #[allow(clippy::doc_markdown)] fn default_rocksdb_bottommost_compression_level() -> i32 { 32767 } -fn default_rocksdb_stats_level() -> u8 { 1 } +fn default_rocksdb_stats_level() -> u8 { 3 } // I know, it's a great name #[must_use] diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 18cec742..1299443d 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -1,8 +1,6 @@ -use std::{cmp, convert::TryFrom}; - -use conduwuit::{Config, Result, utils}; +use conduwuit::{Config, Result}; use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel}; - +use conduwuit::config::{parallelism_scaled_i32, parallelism_scaled_u32}; use super::{cf_opts::cache_size_f64, logger::handle as handle_log}; /// Create database-wide options suitable for opening the database. This also @@ -23,8 +21,8 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul set_logging_defaults(&mut opts, config); // Processing - opts.set_max_background_jobs(num_threads::(config)?); - opts.set_max_subcompactions(num_threads::(config)?); + opts.set_max_background_jobs(parallelism_scaled_i32(1)); + opts.set_max_subcompactions(parallelism_scaled_u32(1)); opts.set_avoid_unnecessary_blocking_io(true); opts.set_max_file_opening_threads(0); @@ -126,15 +124,3 @@ fn set_logging_defaults(opts: &mut Options, config: &Config) { opts.set_callback_logger(rocksdb_log_level, &handle_log); } } - -fn num_threads>(config: &Config) -> Result { - const MIN_PARALLELISM: usize = 2; - - let requested = if config.rocksdb_parallelism_threads != 0 { - config.rocksdb_parallelism_threads - } else { - utils::available_parallelism() - }; - - utils::math::try_into::(cmp::max(MIN_PARALLELISM, requested)) -} From cb275b910bb5798f6de796e6761bd3b205a7c5c8 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Wed, 18 Jun 2025 12:48:27 -0700 Subject: [PATCH 11/49] make fetching key room events less smart --- src/core/matrix/state_res/event_auth.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index fc1119de..ec70d684 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -242,12 +242,16 @@ where } */ - let (room_create_event, power_levels_event, sender_member_event) = join3( - fetch_state(&StateEventType::RoomCreate, ""), - fetch_state(&StateEventType::RoomPowerLevels, ""), - fetch_state(&StateEventType::RoomMember, sender.as_str()), - ) - .await; + // let (room_create_event, power_levels_event, sender_member_event) = join3( + // fetch_state(&StateEventType::RoomCreate, ""), + // fetch_state(&StateEventType::RoomPowerLevels, ""), + // fetch_state(&StateEventType::RoomMember, sender.as_str()), + // ) + // .await; + + let room_create_event = fetch_state(&StateEventType::RoomCreate, "").await; + let power_levels_event = fetch_state(&StateEventType::RoomPowerLevels, "").await; + let sender_member_event = fetch_state(&StateEventType::RoomMember, sender.as_str()).await; let room_create_event = match room_create_event { | None => { From 5484eff931cb0168013e06974c0bbf53f0d1d61f Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Thu, 3 Jul 2025 14:39:10 -0700 Subject: [PATCH 12/49] lock the getter instead ??? c/o M --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 2 +- src/service/rooms/state/mod.rs | 1 + src/service/rooms/timeline/create.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 4093cb05..05f88849 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -149,7 +149,7 @@ where let extremities: Vec<_> = self .services .state - .get_forward_extremities(room_id) + .get_forward_extremities(room_id, &state_lock) .map(ToOwned::to_owned) .ready_filter(|event_id| { // Remove any that are referenced by this incoming event's prev_events diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 641aa6a9..92881126 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -388,6 +388,7 @@ impl Service { pub fn get_forward_extremities<'a>( &'a self, room_id: &'a RoomId, + _state_lock: &'a RoomMutexGuard, ) -> impl Stream + Send + '_ { let prefix = (room_id, Interfix); diff --git a/src/service/rooms/timeline/create.rs b/src/service/rooms/timeline/create.rs index 20ccaf56..1be2f58b 100644 --- a/src/service/rooms/timeline/create.rs +++ b/src/service/rooms/timeline/create.rs @@ -42,7 +42,7 @@ pub async fn create_hash_and_sign_event( let prev_events: Vec = self .services .state - .get_forward_extremities(room_id) + .get_forward_extremities(room_id, _mutex_lock) .take(20) .map(Into::into) .collect() From 2cd966db33a97af465e3bef55de6c7ea10d68890 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Thu, 3 Jul 2025 14:44:27 -0700 Subject: [PATCH 13/49] vehicle loan documentation now available at window 7 --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 05f88849..bc2408df 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -6,6 +6,7 @@ use conduwuit::{ trace, utils::stream::{BroadbandExt, ReadyExt}, warn, + info }; use futures::{FutureExt, StreamExt, future::ready}; use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType}; @@ -167,6 +168,8 @@ where .collect() .await; + if extremities.len() == 0 { info!("Retained zero extremities when upgrading outlier PDU to timeline PDU with {} previous events, event id: {}", incoming_pdu.prev_events.len(), incoming_pdu.event_id) } + debug!( "Retained {} extremities checked against {} prev_events", extremities.len(), From ac3f2e9e9d1844e50bee974c689945242365abdb Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:13:30 -0700 Subject: [PATCH 14/49] sender_workers scaling. this time, with feeling! --- src/core/config/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index f6f2cfc3..ad0dce52 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1889,9 +1889,9 @@ pub struct Config { pub stream_amplification: usize, /// Number of sender task workers; determines sender parallelism. Default is - /// '4'. Override by setting a different value. Values clamped 1 to core count. + /// core count. Override by setting a different value. /// - /// default: 4 + /// default: core count #[serde(default = "default_sender_workers")] pub sender_workers: usize, @@ -2378,7 +2378,7 @@ fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } -fn default_sender_workers() -> usize { 4 } +fn default_sender_workers() -> usize { parallelism_scaled(1) } fn default_client_receive_timeout() -> u64 { 75 } From e76753b113483c3c3fda899af2f0def485437313 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Mon, 30 Jun 2025 15:25:11 -0700 Subject: [PATCH 15/49] more funny settings (part 3 of 12) --- src/core/config/mod.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index ad0dce52..bd44f9ff 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2125,40 +2125,41 @@ fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64( fn default_db_cache_capacity_mb() -> f64 { 512.0 + parallelism_scaled_f64(512.0) } -fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(500_000) } +fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_cache_capacity_modifier() -> f64 { 1.0 } fn default_auth_chain_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_shorteventid_cache_capacity() -> u32 { - parallelism_scaled_u32(100_000).saturating_add(500_000) + parallelism_scaled_u32(100_000).saturating_add(100_000) } fn default_eventidshort_cache_capacity() -> u32 { - parallelism_scaled_u32(100_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_eventid_pdu_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_shortstatekey_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_statekeyshort_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_servernameevent_data_cache_capacity() -> u32 { - parallelism_scaled_u32(200_000).saturating_add(500_000) + parallelism_scaled_u32(100_000).saturating_add(100_000) } fn default_stateinfo_cache_capacity() -> u32 { - parallelism_scaled_u32(500).clamp(100, 12000) } + parallelism_scaled_u32(500).clamp(100, 12000) +} fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(500).clamp(100, 12000) } From 62bdfe1ce8e6400e3ac84a46eb1637a946c8f9d7 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Thu, 17 Jul 2025 23:15:14 +0100 Subject: [PATCH 16/49] feat(space-upgrades): Copy over space child & parent states --- src/api/client/room/upgrade.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index ae632235..afd6f70e 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -24,7 +24,7 @@ use serde_json::{json, value::to_raw_value}; use crate::Ruma; /// Recommended transferable state events list from the spec -const TRANSFERABLE_STATE_EVENTS: &[StateEventType; 9] = &[ +const TRANSFERABLE_STATE_EVENTS: &[StateEventType; 11] = &[ StateEventType::RoomAvatar, StateEventType::RoomEncryption, StateEventType::RoomGuestAccess, @@ -34,6 +34,9 @@ const TRANSFERABLE_STATE_EVENTS: &[StateEventType; 9] = &[ StateEventType::RoomPowerLevels, StateEventType::RoomServerAcl, StateEventType::RoomTopic, + // Not explicitly recommended in spec, but very useful. + StateEventType::SpaceChild, + StateEventType::SpaceParent, // TODO: m.room.policy ]; /// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` @@ -50,10 +53,7 @@ pub(crate) async fn upgrade_room_route( State(services): State, body: Ruma, ) -> Result { - debug_assert!( - TRANSFERABLE_STATE_EVENTS.is_sorted(), - "TRANSFERABLE_STATE_EVENTS is not sorted" - ); + // TODO[v12]: Handle additional creators let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !services.server.supported_room_version(&body.new_version) { From b2883c3d6e43f736cc70959443cacd44c6a1b9c0 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 19 Jul 2025 15:08:21 +0100 Subject: [PATCH 17/49] feat(space-upgrades): Update parent spaces in upgrade This relies on the room being upgraded referencing the space itself, but there isn't an easy way to do it otherwise. --- src/api/client/room/upgrade.rs | 78 ++++++++++++++++++- .../rooms/state_accessor/room_state.rs | 19 +++++ src/service/rooms/state_accessor/user_can.rs | 32 +++++++- 3 files changed, 124 insertions(+), 5 deletions(-) diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index afd6f70e..3a0ed010 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -5,7 +5,7 @@ use conduwuit::{ Err, Error, Event, Result, err, info, matrix::{StateKey, pdu::PduBuilder}, }; -use futures::StreamExt; +use futures::{FutureExt, StreamExt}; use ruma::{ CanonicalJsonObject, RoomId, RoomVersionId, api::client::{error::ErrorKind, room::upgrade_room}, @@ -16,12 +16,13 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, tombstone::RoomTombstoneEventContent, }, + space::child::{RedactedSpaceChildEventContent, SpaceChildEventContent}, }, int, }; use serde_json::{json, value::to_raw_value}; -use crate::Ruma; +use crate::router::Ruma; /// Recommended transferable state events list from the spec const TRANSFERABLE_STATE_EVENTS: &[StateEventType; 11] = &[ @@ -36,7 +37,7 @@ const TRANSFERABLE_STATE_EVENTS: &[StateEventType; 11] = &[ StateEventType::RoomTopic, // Not explicitly recommended in spec, but very useful. StateEventType::SpaceChild, - StateEventType::SpaceParent, // TODO: m.room.policy + StateEventType::SpaceParent, // TODO: m.room.policy? ]; /// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` @@ -128,7 +129,7 @@ pub(crate) async fn upgrade_room_route( ); }, | _ => { - // "creator" key no longer exists in V11+ rooms + // "creator" key no longer exists in V11 rooms create_event_content.remove("creator"); }, } @@ -175,6 +176,7 @@ pub(crate) async fn upgrade_room_route( &replacement_room, &state_lock, ) + .boxed() .await?; // Join the new room @@ -205,6 +207,7 @@ pub(crate) async fn upgrade_room_route( &replacement_room, &state_lock, ) + .boxed() .await?; // Replicate transferable state events to the new room @@ -233,6 +236,7 @@ pub(crate) async fn upgrade_room_route( &replacement_room, &state_lock, ) + .boxed() .await?; } @@ -290,10 +294,76 @@ pub(crate) async fn upgrade_room_route( &body.room_id, &state_lock, ) + .boxed() .await?; drop(state_lock); + // Check if the old room has a space parent, and if so, whether we should update + // it (m.space.parent, room_id) + let parents = services + .rooms + .state_accessor + .room_state_keys(&body.room_id, &StateEventType::SpaceParent) + .await?; + + for raw_space_id in parents { + let space_id = RoomId::parse(&raw_space_id)?; + let state_key = StateKey::from(raw_space_id.clone()); + let Ok(child) = services + .rooms + .state_accessor + .room_state_get_content::( + space_id, + &StateEventType::SpaceChild, + body.room_id.as_str(), + ) + .await + else { + // If the space does not have a child event for this room, we can skip it + continue; + }; + // First, drop the space's child event + let state_lock = services.rooms.state.mutex.lock(space_id).await; + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: StateEventType::SpaceChild.into(), + content: to_raw_value(&RedactedSpaceChildEventContent {}) + .expect("event is valid, we just created it"), + state_key: Some(state_key), + ..Default::default() + }, + sender_user, + space_id, + &state_lock, + ) + .boxed() + .await + .ok(); + // Now, add a new child event for the replacement room + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: StateEventType::SpaceChild.into(), + content: to_raw_value(&child).expect("event is valid, we just created it"), + state_key: Some(StateKey::new()), + ..Default::default() + }, + sender_user, + space_id, + &state_lock, + ) + .boxed() + .await + .ok(); + drop(state_lock); + } + // Return the replacement room id Ok(upgrade_room::v3::Response { replacement_room }) } diff --git a/src/service/rooms/state_accessor/room_state.rs b/src/service/rooms/state_accessor/room_state.rs index 89a66f0c..b5306485 100644 --- a/src/service/rooms/state_accessor/room_state.rs +++ b/src/service/rooms/state_accessor/room_state.rs @@ -91,3 +91,22 @@ pub async fn room_state_get( .and_then(|shortstatehash| self.state_get(shortstatehash, event_type, state_key)) .await } + +/// Returns all state keys for the given `room_id` and `event_type`. +#[implement(super::Service)] +#[tracing::instrument(skip(self), level = "debug")] +pub async fn room_state_keys( + &self, + room_id: &RoomId, + event_type: &StateEventType, +) -> Result> { + let shortstatehash = self.services.state.get_room_shortstatehash(room_id).await?; + + let state_keys: Vec = self + .state_keys(shortstatehash, event_type) + .map(|state_key| state_key.to_string()) + .collect() + .await; + + Ok(state_keys) +} diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index 221263a8..5bbed173 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -1,6 +1,6 @@ use conduwuit::{Err, Result, implement, matrix::Event, pdu::PduBuilder}; use ruma::{ - EventId, RoomId, UserId, + EventId, Int, RoomId, UserId, events::{ StateEventType, TimelineEventType, room::{ @@ -167,3 +167,33 @@ pub async fn user_can_invite( .await .is_ok() } + +#[implement(super::Service)] +pub async fn current_power_levels( + &self, + room_id: &RoomId, +) -> Result { + // fetches the current power levels event content for a room, returning the + // default power levels if no power levels event is found + let pl_event_content = self + .room_state_get_content::( + room_id, + &StateEventType::RoomPowerLevels, + "", + ) + .await; + if let Ok(pl_event_content) = pl_event_content { + Ok(pl_event_content) + } else { + let mut default_power_levels = RoomPowerLevelsEventContent::default(); + + // set the creator as PL100 + let create_event = self + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .await?; + default_power_levels + .users + .insert(create_event.sender().to_owned(), Int::from(100)); + Ok(default_power_levels) + } +} From 3b5335630dacd8e85bfb2eba4e127f84eda1ce56 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 19 Jul 2025 15:17:27 +0100 Subject: [PATCH 18/49] feat(space-upgrades): Transfer all state keys during upgrade Before this change, only state events with an empty state key would be cloned. This allows m.space.child to be cloned appropriately. --- src/api/client/room/upgrade.rs | 53 +++++++++++++++++++--------------- 1 file changed, 30 insertions(+), 23 deletions(-) diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 3a0ed010..0d6d2805 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -212,32 +212,39 @@ pub(crate) async fn upgrade_room_route( // Replicate transferable state events to the new room for event_type in TRANSFERABLE_STATE_EVENTS { - let event_content = match services + let state_keys = services .rooms .state_accessor - .room_state_get(&body.room_id, event_type, "") - .await - { - | Ok(v) => v.content().to_owned(), - | Err(_) => continue, // Skipping missing events. - }; - - services - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: event_type.to_string().into(), - content: event_content, - state_key: Some(StateKey::new()), - ..Default::default() - }, - sender_user, - &replacement_room, - &state_lock, - ) - .boxed() + .room_state_keys(&body.room_id, event_type) .await?; + for state_key in state_keys { + let event_content = match services + .rooms + .state_accessor + .room_state_get(&body.room_id, event_type, &state_key) + .await + { + | Ok(v) => v.content().to_owned(), + | Err(_) => continue, // Skipping missing events. + }; + + services + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: event_type.to_string().into(), + content: event_content, + state_key: Some(StateKey::from(state_key)), + ..Default::default() + }, + sender_user, + &replacement_room, + &state_lock, + ) + .boxed() + .await?; + } } // Moves any local aliases to the new room From f063814d9457a5600536d78938b42860a89c6c0e Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 19 Jul 2025 15:22:50 +0100 Subject: [PATCH 19/49] fix(space-upgrades): Incorrectly updated parent children events --- src/api/client/room/upgrade.rs | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 0d6d2805..0109715b 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -2,7 +2,7 @@ use std::cmp::max; use axum::extract::State; use conduwuit::{ - Err, Error, Event, Result, err, info, + Err, Error, Event, Result, debug, err, info, matrix::{StateKey, pdu::PduBuilder}, }; use futures::{FutureExt, StreamExt}; @@ -316,7 +316,6 @@ pub(crate) async fn upgrade_room_route( for raw_space_id in parents { let space_id = RoomId::parse(&raw_space_id)?; - let state_key = StateKey::from(raw_space_id.clone()); let Ok(child) = services .rooms .state_accessor @@ -330,8 +329,13 @@ pub(crate) async fn upgrade_room_route( // If the space does not have a child event for this room, we can skip it continue; }; + debug!( + "Updating space {space_id} child event for room {} to {replacement_room}", + &body.room_id + ); // First, drop the space's child event let state_lock = services.rooms.state.mutex.lock(space_id).await; + debug!("Removing space child event for room {} in space {space_id}", &body.room_id); services .rooms .timeline @@ -340,7 +344,7 @@ pub(crate) async fn upgrade_room_route( event_type: StateEventType::SpaceChild.into(), content: to_raw_value(&RedactedSpaceChildEventContent {}) .expect("event is valid, we just created it"), - state_key: Some(state_key), + state_key: Some(body.room_id.clone().as_str().into()), ..Default::default() }, sender_user, @@ -351,6 +355,7 @@ pub(crate) async fn upgrade_room_route( .await .ok(); // Now, add a new child event for the replacement room + debug!("Adding space child event for room {replacement_room} in space {space_id}"); services .rooms .timeline @@ -358,7 +363,7 @@ pub(crate) async fn upgrade_room_route( PduBuilder { event_type: StateEventType::SpaceChild.into(), content: to_raw_value(&child).expect("event is valid, we just created it"), - state_key: Some(StateKey::new()), + state_key: Some(replacement_room.as_str().into()), ..Default::default() }, sender_user, @@ -368,6 +373,10 @@ pub(crate) async fn upgrade_room_route( .boxed() .await .ok(); + debug!( + "Finished updating space {space_id} child event for room {} to {replacement_room}", + &body.room_id + ); drop(state_lock); } From 57868a008c9e1dc5ad9c86865b0e61421ac5670c Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 19 Jul 2025 15:41:36 +0100 Subject: [PATCH 20/49] feat(space-upgrades): Skip empty state events in room upgrade --- src/api/client/room/upgrade.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 0109715b..02495902 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -227,6 +227,10 @@ pub(crate) async fn upgrade_room_route( | Ok(v) => v.content().to_owned(), | Err(_) => continue, // Skipping missing events. }; + if event_content.get() == "{}" { + // If the event content is empty, we skip it + continue; + } services .rooms From b2b18002ea28858ec9f4193c2302ec5fbce19638 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 19 Jul 2025 15:44:49 +0100 Subject: [PATCH 21/49] fix(space-upgrades): Remove unused helper function --- src/service/rooms/state_accessor/user_can.rs | 30 -------------------- 1 file changed, 30 deletions(-) diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index 5bbed173..f7f3377c 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -167,33 +167,3 @@ pub async fn user_can_invite( .await .is_ok() } - -#[implement(super::Service)] -pub async fn current_power_levels( - &self, - room_id: &RoomId, -) -> Result { - // fetches the current power levels event content for a room, returning the - // default power levels if no power levels event is found - let pl_event_content = self - .room_state_get_content::( - room_id, - &StateEventType::RoomPowerLevels, - "", - ) - .await; - if let Ok(pl_event_content) = pl_event_content { - Ok(pl_event_content) - } else { - let mut default_power_levels = RoomPowerLevelsEventContent::default(); - - // set the creator as PL100 - let create_event = self - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .await?; - default_power_levels - .users - .insert(create_event.sender().to_owned(), Int::from(100)); - Ok(default_power_levels) - } -} From 331832616f346050201d9949c9ee4493b8b1ef65 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 19 Jul 2025 15:51:03 +0100 Subject: [PATCH 22/49] feat(space-upgrades): MSC4168: Override space child vias --- src/api/client/room/upgrade.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/api/client/room/upgrade.rs b/src/api/client/room/upgrade.rs index 02495902..c2c3aa81 100644 --- a/src/api/client/room/upgrade.rs +++ b/src/api/client/room/upgrade.rs @@ -366,7 +366,12 @@ pub(crate) async fn upgrade_room_route( .build_and_append_pdu( PduBuilder { event_type: StateEventType::SpaceChild.into(), - content: to_raw_value(&child).expect("event is valid, we just created it"), + content: to_raw_value(&SpaceChildEventContent { + via: vec![sender_user.server_name().to_owned()], + order: child.order, + suggested: child.suggested, + }) + .expect("event is valid, we just created it"), state_key: Some(replacement_room.as_str().into()), ..Default::default() }, From c639228f4ddc14f73fb3bbcc1316ef9dbe024408 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 19 Jul 2025 15:57:53 +0100 Subject: [PATCH 23/49] style(space-upgrades): Remove unused import left over from 6691b7672b --- src/service/rooms/state_accessor/user_can.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/rooms/state_accessor/user_can.rs b/src/service/rooms/state_accessor/user_can.rs index f7f3377c..221263a8 100644 --- a/src/service/rooms/state_accessor/user_can.rs +++ b/src/service/rooms/state_accessor/user_can.rs @@ -1,6 +1,6 @@ use conduwuit::{Err, Result, implement, matrix::Event, pdu::PduBuilder}; use ruma::{ - EventId, Int, RoomId, UserId, + EventId, RoomId, UserId, events::{ StateEventType, TimelineEventType, room::{ From f513cb75989bf48610784f26a2b73781e306dfbe Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Jul 2025 20:31:54 +0100 Subject: [PATCH 24/49] chore: Remove false positives in typo checks --- .typos.toml | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/.typos.toml b/.typos.toml index 41c81085..63c4670d 100644 --- a/.typos.toml +++ b/.typos.toml @@ -1,5 +1,19 @@ [files] -extend-exclude = ["*.csr"] +extend-exclude = ["*.csr", "*.lock", "pnpm-lock.yaml"] + +[default] + +extend-ignore-re = [ + "(?Rm)^.*(#|//|)$", # Ignore a line by making it trail with a `spellchecker:disable-line` comment + "^[0-9a-f]{7,}$", # Commit hashes + + # some heuristics for base64 strings + "[A-Za-z0-9+=]{72,}", + "([A-Za-z0-9+=]|\\\\\\s\\*){72,}", + "[0-9+][A-Za-z0-9+]{30,}[a-z0-9+]", + "\\$[A-Z0-9+][A-Za-z0-9+]{6,}[a-z0-9+]", + "\\b[a-z0-9+/=][A-Za-z0-9+/=]{7,}[a-z0-9+/=][A-Z]\\b", +] [default.extend-words] "allocatedp" = "allocatedp" From 9051ce63f7c419f2eb14a1cfcfb849bf831723db Mon Sep 17 00:00:00 2001 From: rooot Date: Sun, 20 Jul 2025 03:14:35 +0200 Subject: [PATCH 25/49] feat(config): introduce federation connection timeout setting fixes #906 Signed-off-by: rooot --- conduwuit-example.toml | 8 ++++++++ src/core/config/mod.rs | 12 ++++++++++++ src/service/client/mod.rs | 3 +++ 3 files changed, 23 insertions(+) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index bdc2f570..3b7bbbb8 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -325,6 +325,14 @@ # #well_known_timeout = 10 +# Federation client connection timeout (seconds). You should not set this +# to high values, as dead homeservers can significantly slow down +# federation, specifically key retrieval, which will take roughly the +# amount of time you configure here given that a homeserver doesn't +# respond. +# +#federation_conn_timeout = 10 + # Federation client request timeout (seconds). You most definitely want # this to be high to account for extremely large room joins, slow # homeservers, your own resources etc. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index d93acd9b..515409be 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -412,6 +412,16 @@ pub struct Config { #[serde(default = "default_well_known_timeout")] pub well_known_timeout: u64, + /// Federation client connection timeout (seconds). You should not set this + /// to high values, as dead homeservers can significantly slow down + /// federation, specifically key retrieval, which will take roughly the + /// amount of time you configure here given that a homeserver doesn't + /// respond. + /// + /// default: 10 + #[serde(default = "default_federation_conn_timeout")] + pub federation_conn_timeout: u64, + /// Federation client request timeout (seconds). You most definitely want /// this to be high to account for extremely large room joins, slow /// homeservers, your own resources etc. @@ -2193,6 +2203,8 @@ fn default_well_known_conn_timeout() -> u64 { 6 } fn default_well_known_timeout() -> u64 { 10 } +fn default_federation_conn_timeout() -> u64 { 10 } + fn default_federation_timeout() -> u64 { 25 } fn default_federation_idle_timeout() -> u64 { 25 } diff --git a/src/service/client/mod.rs b/src/service/client/mod.rs index 1aeeb492..239340ba 100644 --- a/src/service/client/mod.rs +++ b/src/service/client/mod.rs @@ -66,6 +66,7 @@ impl crate::Service for Service { federation: base(config)? .dns_resolver(resolver.resolver.hooked.clone()) + .connect_timeout(Duration::from_secs(config.federation_conn_timeout)) .read_timeout(Duration::from_secs(config.federation_timeout)) .pool_max_idle_per_host(config.federation_idle_per_host.into()) .pool_idle_timeout(Duration::from_secs(config.federation_idle_timeout)) @@ -74,6 +75,7 @@ impl crate::Service for Service { synapse: base(config)? .dns_resolver(resolver.resolver.hooked.clone()) + .connect_timeout(Duration::from_secs(config.federation_conn_timeout)) .read_timeout(Duration::from_secs(305)) .pool_max_idle_per_host(0) .redirect(redirect::Policy::limited(3)) @@ -81,6 +83,7 @@ impl crate::Service for Service { sender: base(config)? .dns_resolver(resolver.resolver.hooked.clone()) + .connect_timeout(Duration::from_secs(config.federation_conn_timeout)) .read_timeout(Duration::from_secs(config.sender_timeout)) .timeout(Duration::from_secs(config.sender_timeout)) .pool_max_idle_per_host(1) From 0631094350bd07b35fbbc7aa9b70a0eb74cd3b28 Mon Sep 17 00:00:00 2001 From: rooot Date: Sun, 20 Jul 2025 16:46:18 +0200 Subject: [PATCH 26/49] docs(config): warn about federation key query timeout caveat Signed-off-by: rooot --- conduwuit-example.toml | 3 ++- src/core/config/mod.rs | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3b7bbbb8..2fab9cdf 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -329,7 +329,8 @@ # to high values, as dead homeservers can significantly slow down # federation, specifically key retrieval, which will take roughly the # amount of time you configure here given that a homeserver doesn't -# respond. +# respond. This will cause most clients to time out /keys/query, causing +# E2EE and device verification to fail. # #federation_conn_timeout = 10 diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 515409be..909462db 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -416,7 +416,8 @@ pub struct Config { /// to high values, as dead homeservers can significantly slow down /// federation, specifically key retrieval, which will take roughly the /// amount of time you configure here given that a homeserver doesn't - /// respond. + /// respond. This will cause most clients to time out /keys/query, causing + /// E2EE and device verification to fail. /// /// default: 10 #[serde(default = "default_federation_conn_timeout")] From 30a8c06fd9caa4276a4261a107fc84414e36ce6c Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Jul 2025 20:36:27 +0100 Subject: [PATCH 27/49] refactor: Replace std Mutex with parking_lot --- Cargo.lock | 1 + Cargo.toml | 7 +++++++ src/admin/processor.rs | 16 +++++----------- src/core/Cargo.toml | 1 + src/core/info/rustc.rs | 10 +++------- src/core/log/capture/layer.rs | 2 +- src/core/log/capture/mod.rs | 8 +++++--- src/core/log/capture/util.rs | 12 ++++++------ src/core/log/reload.rs | 16 ++++------------ src/core/mod.rs | 1 + src/core/utils/mutex_map.rs | 25 ++++++++----------------- src/macros/rustc.rs | 4 ++-- 12 files changed, 44 insertions(+), 59 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6f711007..b084f72a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -967,6 +967,7 @@ dependencies = [ "maplit", "nix", "num-traits", + "parking_lot", "rand 0.8.5", "regex", "reqwest", diff --git a/Cargo.toml b/Cargo.toml index ef917332..3e52c4b2 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -515,6 +515,13 @@ version = "1.0" [workspace.dependencies.proc-macro2] version = "1.0" +[workspace.dependencies.parking_lot] +version = "0.12.4" + +# Use this when extending with_lock::WithLock to parking_lot +# [workspace.dependencies.lock_api] +# version = "0.4.13" + [workspace.dependencies.bytesize] version = "2.0" diff --git a/src/admin/processor.rs b/src/admin/processor.rs index e80000c1..2c91efe1 100644 --- a/src/admin/processor.rs +++ b/src/admin/processor.rs @@ -1,14 +1,8 @@ -use std::{ - fmt::Write, - mem::take, - panic::AssertUnwindSafe, - sync::{Arc, Mutex}, - time::SystemTime, -}; +use std::{fmt::Write, mem::take, panic::AssertUnwindSafe, sync::Arc, time::SystemTime}; use clap::{CommandFactory, Parser}; use conduwuit::{ - Error, Result, debug, error, + Error, Result, SyncMutex, debug, error, log::{ capture, capture::Capture, @@ -123,7 +117,7 @@ async fn process( let mut output = String::new(); // Prepend the logs only if any were captured - let logs = logs.lock().expect("locked"); + let logs = logs.lock(); if logs.lines().count() > 2 { writeln!(&mut output, "{logs}").expect("failed to format logs to command output"); } @@ -132,7 +126,7 @@ async fn process( (result, output) } -fn capture_create(context: &Context<'_>) -> (Arc, Arc>) { +fn capture_create(context: &Context<'_>) -> (Arc, Arc>) { let env_config = &context.services.server.config.admin_log_capture; let env_filter = EnvFilter::try_new(env_config).unwrap_or_else(|e| { warn!("admin_log_capture filter invalid: {e:?}"); @@ -152,7 +146,7 @@ fn capture_create(context: &Context<'_>) -> (Arc, Arc>) { data.level() <= log_level && data.our_modules() && data.scope.contains(&"admin") }; - let logs = Arc::new(Mutex::new( + let logs = Arc::new(SyncMutex::new( collect_stream(|s| markdown_table_head(s)).expect("markdown table header"), )); diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 0c33c590..7a3721d6 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -110,6 +110,7 @@ tracing-core.workspace = true tracing-subscriber.workspace = true tracing.workspace = true url.workspace = true +parking_lot.workspace = true [target.'cfg(unix)'.dependencies] nix.workspace = true diff --git a/src/core/info/rustc.rs b/src/core/info/rustc.rs index 048c0cd5..60156301 100644 --- a/src/core/info/rustc.rs +++ b/src/core/info/rustc.rs @@ -3,18 +3,15 @@ //! several crates, lower-level information is supplied from each crate during //! static initialization. -use std::{ - collections::BTreeMap, - sync::{Mutex, OnceLock}, -}; +use std::{collections::BTreeMap, sync::OnceLock}; -use crate::utils::exchange; +use crate::{SyncMutex, utils::exchange}; /// Raw capture of rustc flags used to build each crate in the project. Informed /// by rustc_flags_capture macro (one in each crate's mod.rs). This is /// done during static initialization which is why it's mutex-protected and pub. /// Should not be written to by anything other than our macro. -pub static FLAGS: Mutex> = Mutex::new(BTreeMap::new()); +pub static FLAGS: SyncMutex> = SyncMutex::new(BTreeMap::new()); /// Processed list of enabled features across all project crates. This is /// generated from the data in FLAGS. @@ -27,7 +24,6 @@ fn init_features() -> Vec<&'static str> { let mut features = Vec::new(); FLAGS .lock() - .expect("locked") .iter() .for_each(|(_, flags)| append_features(&mut features, flags)); diff --git a/src/core/log/capture/layer.rs b/src/core/log/capture/layer.rs index 381a652f..b3235d91 100644 --- a/src/core/log/capture/layer.rs +++ b/src/core/log/capture/layer.rs @@ -55,7 +55,7 @@ where let mut visitor = Visitor { values: Values::new() }; event.record(&mut visitor); - let mut closure = capture.closure.lock().expect("exclusive lock"); + let mut closure = capture.closure.lock(); closure(Data { layer, event, diff --git a/src/core/log/capture/mod.rs b/src/core/log/capture/mod.rs index 20f70091..b7e5d2b5 100644 --- a/src/core/log/capture/mod.rs +++ b/src/core/log/capture/mod.rs @@ -4,7 +4,7 @@ pub mod layer; pub mod state; pub mod util; -use std::sync::{Arc, Mutex}; +use std::sync::Arc; pub use data::Data; use guard::Guard; @@ -12,6 +12,8 @@ pub use layer::{Layer, Value}; pub use state::State; pub use util::*; +use crate::SyncMutex; + pub type Filter = dyn Fn(Data<'_>) -> bool + Send + Sync + 'static; pub type Closure = dyn FnMut(Data<'_>) + Send + Sync + 'static; @@ -19,7 +21,7 @@ pub type Closure = dyn FnMut(Data<'_>) + Send + Sync + 'static; pub struct Capture { state: Arc, filter: Option>, - closure: Mutex>, + closure: SyncMutex>, } impl Capture { @@ -34,7 +36,7 @@ impl Capture { Arc::new(Self { state: state.clone(), filter: filter.map(|p| -> Box { Box::new(p) }), - closure: Mutex::new(Box::new(closure)), + closure: SyncMutex::new(Box::new(closure)), }) } diff --git a/src/core/log/capture/util.rs b/src/core/log/capture/util.rs index 65524be5..21a416a9 100644 --- a/src/core/log/capture/util.rs +++ b/src/core/log/capture/util.rs @@ -1,31 +1,31 @@ -use std::sync::{Arc, Mutex}; +use std::sync::Arc; use super::{ super::{Level, fmt}, Closure, Data, }; -use crate::Result; +use crate::{Result, SyncMutex}; -pub fn fmt_html(out: Arc>) -> Box +pub fn fmt_html(out: Arc>) -> Box where S: std::fmt::Write + Send + 'static, { fmt(fmt::html, out) } -pub fn fmt_markdown(out: Arc>) -> Box +pub fn fmt_markdown(out: Arc>) -> Box where S: std::fmt::Write + Send + 'static, { fmt(fmt::markdown, out) } -pub fn fmt(fun: F, out: Arc>) -> Box +pub fn fmt(fun: F, out: Arc>) -> Box where F: Fn(&mut S, &Level, &str, &str) -> Result<()> + Send + Sync + Copy + 'static, S: std::fmt::Write + Send + 'static, { - Box::new(move |data| call(fun, &mut *out.lock().expect("locked"), &data)) + Box::new(move |data| call(fun, &mut *out.lock(), &data)) } fn call(fun: F, out: &mut S, data: &Data<'_>) diff --git a/src/core/log/reload.rs b/src/core/log/reload.rs index f72fde47..356ee9f2 100644 --- a/src/core/log/reload.rs +++ b/src/core/log/reload.rs @@ -1,11 +1,8 @@ -use std::{ - collections::HashMap, - sync::{Arc, Mutex}, -}; +use std::{collections::HashMap, sync::Arc}; use tracing_subscriber::{EnvFilter, reload}; -use crate::{Result, error}; +use crate::{Result, SyncMutex, error}; /// We need to store a reload::Handle value, but can't name it's type explicitly /// because the S type parameter depends on the subscriber's previous layers. In @@ -35,7 +32,7 @@ impl ReloadHandle for reload::Handle { #[derive(Clone)] pub struct LogLevelReloadHandles { - handles: Arc>, + handles: Arc>, } type HandleMap = HashMap; @@ -43,16 +40,12 @@ type Handle = Box + Send + Sync>; impl LogLevelReloadHandles { pub fn add(&self, name: &str, handle: Handle) { - self.handles - .lock() - .expect("locked") - .insert(name.into(), handle); + self.handles.lock().insert(name.into(), handle); } pub fn reload(&self, new_value: &EnvFilter, names: Option<&[&str]>) -> Result<()> { self.handles .lock() - .expect("locked") .iter() .filter(|(name, _)| names.is_some_and(|names| names.contains(&name.as_str()))) .for_each(|(_, handle)| { @@ -66,7 +59,6 @@ impl LogLevelReloadHandles { pub fn current(&self, name: &str) -> Option { self.handles .lock() - .expect("locked") .get(name) .map(|handle| handle.current())? } diff --git a/src/core/mod.rs b/src/core/mod.rs index d99139be..363fece8 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -28,6 +28,7 @@ pub use info::{ pub use matrix::{ Event, EventTypeExt, Pdu, PduCount, PduEvent, PduId, RoomVersion, pdu, state_res, }; +pub use parking_lot::{Mutex as SyncMutex, RwLock as SyncRwLock}; pub use server::Server; pub use utils::{ctor, dtor, implement, result, result::Result}; diff --git a/src/core/utils/mutex_map.rs b/src/core/utils/mutex_map.rs index 01504ce6..ddb361a4 100644 --- a/src/core/utils/mutex_map.rs +++ b/src/core/utils/mutex_map.rs @@ -1,12 +1,8 @@ -use std::{ - fmt::Debug, - hash::Hash, - sync::{Arc, TryLockError::WouldBlock}, -}; +use std::{fmt::Debug, hash::Hash, sync::Arc}; use tokio::sync::OwnedMutexGuard as Omg; -use crate::{Result, err}; +use crate::{Result, SyncMutex, err}; /// Map of Mutexes pub struct MutexMap { @@ -19,7 +15,7 @@ pub struct Guard { } type Map = Arc>; -type MapMutex = std::sync::Mutex>; +type MapMutex = SyncMutex>; type HashMap = std::collections::HashMap>; type Value = Arc>; @@ -45,7 +41,6 @@ where let val = self .map .lock() - .expect("locked") .entry(k.try_into().expect("failed to construct key")) .or_default() .clone(); @@ -66,7 +61,6 @@ where let val = self .map .lock() - .expect("locked") .entry(k.try_into().expect("failed to construct key")) .or_default() .clone(); @@ -87,10 +81,7 @@ where let val = self .map .try_lock() - .map_err(|e| match e { - | WouldBlock => err!("would block"), - | _ => panic!("{e:?}"), - })? + .ok_or_else(|| err!("would block"))? .entry(k.try_into().expect("failed to construct key")) .or_default() .clone(); @@ -102,13 +93,13 @@ where } #[must_use] - pub fn contains(&self, k: &Key) -> bool { self.map.lock().expect("locked").contains_key(k) } + pub fn contains(&self, k: &Key) -> bool { self.map.lock().contains_key(k) } #[must_use] - pub fn is_empty(&self) -> bool { self.map.lock().expect("locked").is_empty() } + pub fn is_empty(&self) -> bool { self.map.lock().is_empty() } #[must_use] - pub fn len(&self) -> usize { self.map.lock().expect("locked").len() } + pub fn len(&self) -> usize { self.map.lock().len() } } impl Default for MutexMap @@ -123,7 +114,7 @@ impl Drop for Guard { #[tracing::instrument(name = "unlock", level = "trace", skip_all)] fn drop(&mut self) { if Arc::strong_count(Omg::mutex(&self.val)) <= 2 { - self.map.lock().expect("locked").retain(|_, val| { + self.map.lock().retain(|_, val| { !Arc::ptr_eq(val, Omg::mutex(&self.val)) || Arc::strong_count(val) > 2 }); } diff --git a/src/macros/rustc.rs b/src/macros/rustc.rs index 1220c8d4..cf935fe5 100644 --- a/src/macros/rustc.rs +++ b/src/macros/rustc.rs @@ -15,13 +15,13 @@ pub(super) fn flags_capture(args: TokenStream) -> TokenStream { #[conduwuit_core::ctor] fn _set_rustc_flags() { - conduwuit_core::info::rustc::FLAGS.lock().expect("locked").insert(#crate_name, &RUSTC_FLAGS); + conduwuit_core::info::rustc::FLAGS.lock().insert(#crate_name, &RUSTC_FLAGS); } // static strings have to be yanked on module unload #[conduwuit_core::dtor] fn _unset_rustc_flags() { - conduwuit_core::info::rustc::FLAGS.lock().expect("locked").remove(#crate_name); + conduwuit_core::info::rustc::FLAGS.lock().remove(#crate_name); } }; From a1d616e3e3dfc6ad7bdd165d9435bfc733955b73 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Jul 2025 21:03:17 +0100 Subject: [PATCH 28/49] refactor: Replace std RwLock with parking_lot --- src/core/log/capture/layer.rs | 1 - src/core/log/capture/state.rs | 14 +++++----- src/database/watchers.rs | 10 +++---- src/service/globals/data.rs | 22 +++++++-------- src/service/manager.rs | 1 - src/service/service.rs | 50 ++++++++++++++++------------------- src/service/services.rs | 18 +++++-------- src/service/uiaa/mod.rs | 10 +++---- 8 files changed, 54 insertions(+), 72 deletions(-) diff --git a/src/core/log/capture/layer.rs b/src/core/log/capture/layer.rs index b3235d91..e3fe66df 100644 --- a/src/core/log/capture/layer.rs +++ b/src/core/log/capture/layer.rs @@ -40,7 +40,6 @@ where self.state .active .read() - .expect("shared lock") .iter() .filter(|capture| filter(self, capture, event, &ctx)) .for_each(|capture| handle(self, capture, event, &ctx)); diff --git a/src/core/log/capture/state.rs b/src/core/log/capture/state.rs index dad6c8d8..92a1608f 100644 --- a/src/core/log/capture/state.rs +++ b/src/core/log/capture/state.rs @@ -1,10 +1,11 @@ -use std::sync::{Arc, RwLock}; +use std::sync::Arc; use super::Capture; +use crate::SyncRwLock; /// Capture layer state. pub struct State { - pub(super) active: RwLock>>, + pub(super) active: SyncRwLock>>, } impl Default for State { @@ -13,17 +14,14 @@ impl Default for State { impl State { #[must_use] - pub fn new() -> Self { Self { active: RwLock::new(Vec::new()) } } + pub fn new() -> Self { Self { active: SyncRwLock::new(Vec::new()) } } pub(super) fn add(&self, capture: &Arc) { - self.active - .write() - .expect("locked for writing") - .push(capture.clone()); + self.active.write().push(capture.clone()); } pub(super) fn del(&self, capture: &Arc) { - let mut vec = self.active.write().expect("locked for writing"); + let mut vec = self.active.write(); if let Some(pos) = vec.iter().position(|v| Arc::ptr_eq(v, capture)) { vec.swap_remove(pos); } diff --git a/src/database/watchers.rs b/src/database/watchers.rs index efb939d7..0e911c82 100644 --- a/src/database/watchers.rs +++ b/src/database/watchers.rs @@ -2,12 +2,12 @@ use std::{ collections::{HashMap, hash_map}, future::Future, pin::Pin, - sync::RwLock, }; +use conduwuit::SyncRwLock; use tokio::sync::watch; -type Watcher = RwLock, (watch::Sender<()>, watch::Receiver<()>)>>; +type Watcher = SyncRwLock, (watch::Sender<()>, watch::Receiver<()>)>>; #[derive(Default)] pub(crate) struct Watchers { @@ -19,7 +19,7 @@ impl Watchers { &'a self, prefix: &[u8], ) -> Pin + Send + 'a>> { - let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) { + let mut rx = match self.watchers.write().entry(prefix.to_vec()) { | hash_map::Entry::Occupied(o) => o.get().1.clone(), | hash_map::Entry::Vacant(v) => { let (tx, rx) = watch::channel(()); @@ -35,7 +35,7 @@ impl Watchers { } pub(crate) fn wake(&self, key: &[u8]) { - let watchers = self.watchers.read().unwrap(); + let watchers = self.watchers.read(); let mut triggered = Vec::new(); for length in 0..=key.len() { if watchers.contains_key(&key[..length]) { @@ -46,7 +46,7 @@ impl Watchers { drop(watchers); if !triggered.is_empty() { - let mut watchers = self.watchers.write().unwrap(); + let mut watchers = self.watchers.write(); for prefix in triggered { if let Some(tx) = watchers.remove(prefix) { tx.0.send(()).expect("channel should still be open"); diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 21c09252..07f1de5c 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,11 +1,11 @@ -use std::sync::{Arc, RwLock}; +use std::sync::Arc; -use conduwuit::{Result, utils}; +use conduwuit::{Result, SyncRwLock, utils}; use database::{Database, Deserialized, Map}; pub struct Data { global: Arc, - counter: RwLock, + counter: SyncRwLock, pub(super) db: Arc, } @@ -16,25 +16,21 @@ impl Data { let db = &args.db; Self { global: db["global"].clone(), - counter: RwLock::new( - Self::stored_count(&db["global"]).expect("initialized global counter"), - ), + counter: SyncRwLock::new(Self::stored_count(&db["global"]).unwrap_or_default()), db: args.db.clone(), } } pub fn next_count(&self) -> Result { let _cork = self.db.cork(); - let mut lock = self.counter.write().expect("locked"); + let mut lock = self.counter.write(); let counter: &mut u64 = &mut lock; debug_assert!( - *counter == Self::stored_count(&self.global).expect("database failure"), + *counter == Self::stored_count(&self.global).unwrap_or_default(), "counter mismatch" ); - *counter = counter - .checked_add(1) - .expect("counter must not overflow u64"); + *counter = counter.checked_add(1).unwrap_or(*counter); self.global.insert(COUNTER, counter.to_be_bytes()); @@ -43,10 +39,10 @@ impl Data { #[inline] pub fn current_count(&self) -> u64 { - let lock = self.counter.read().expect("locked"); + let lock = self.counter.read(); let counter: &u64 = &lock; debug_assert!( - *counter == Self::stored_count(&self.global).expect("database failure"), + *counter == Self::stored_count(&self.global).unwrap_or_default(), "counter mismatch" ); diff --git a/src/service/manager.rs b/src/service/manager.rs index 3cdf5945..7a2e50d5 100644 --- a/src/service/manager.rs +++ b/src/service/manager.rs @@ -58,7 +58,6 @@ impl Manager { let services: Vec> = self .service .read() - .expect("locked for reading") .values() .map(|val| val.0.upgrade()) .map(|arc| arc.expect("services available for manager startup")) diff --git a/src/service/service.rs b/src/service/service.rs index 574efd8f..3bc61aeb 100644 --- a/src/service/service.rs +++ b/src/service/service.rs @@ -3,11 +3,13 @@ use std::{ collections::BTreeMap, fmt::Write, ops::Deref, - sync::{Arc, OnceLock, RwLock, Weak}, + sync::{Arc, OnceLock, Weak}, }; use async_trait::async_trait; -use conduwuit::{Err, Result, Server, err, error::inspect_log, utils::string::SplitInfallible}; +use conduwuit::{ + Err, Result, Server, SyncRwLock, err, error::inspect_log, utils::string::SplitInfallible, +}; use database::Database; /// Abstract interface for a Service @@ -62,7 +64,7 @@ pub(crate) struct Dep { name: &'static str, } -pub(crate) type Map = RwLock; +pub(crate) type Map = SyncRwLock; pub(crate) type MapType = BTreeMap; pub(crate) type MapVal = (Weak, Weak); pub(crate) type MapKey = String; @@ -143,15 +145,12 @@ pub(crate) fn get(map: &Map, name: &str) -> Option> where T: Any + Send + Sync + Sized, { - map.read() - .expect("locked for reading") - .get(name) - .map(|(_, s)| { - s.upgrade().map(|s| { - s.downcast::() - .expect("Service must be correctly downcast.") - }) - })? + map.read().get(name).map(|(_, s)| { + s.upgrade().map(|s| { + s.downcast::() + .expect("Service must be correctly downcast.") + }) + })? } /// Reference a Service by name. Returns Err if the Service does not exist or @@ -160,21 +159,18 @@ pub(crate) fn try_get(map: &Map, name: &str) -> Result> where T: Any + Send + Sync + Sized, { - map.read() - .expect("locked for reading") - .get(name) - .map_or_else( - || Err!("Service {name:?} does not exist or has not been built yet."), - |(_, s)| { - s.upgrade().map_or_else( - || Err!("Service {name:?} no longer exists."), - |s| { - s.downcast::() - .map_err(|_| err!("Service {name:?} must be correctly downcast.")) - }, - ) - }, - ) + map.read().get(name).map_or_else( + || Err!("Service {name:?} does not exist or has not been built yet."), + |(_, s)| { + s.upgrade().map_or_else( + || Err!("Service {name:?} no longer exists."), + |s| { + s.downcast::() + .map_err(|_| err!("Service {name:?} must be correctly downcast.")) + }, + ) + }, + ) } /// Utility for service implementations; see Service::name() in the trait. diff --git a/src/service/services.rs b/src/service/services.rs index daece245..642f61c7 100644 --- a/src/service/services.rs +++ b/src/service/services.rs @@ -1,10 +1,8 @@ -use std::{ - any::Any, - collections::BTreeMap, - sync::{Arc, RwLock}, -}; +use std::{any::Any, collections::BTreeMap, sync::Arc}; -use conduwuit::{Result, Server, debug, debug_info, info, trace, utils::stream::IterStream}; +use conduwuit::{ + Result, Server, SyncRwLock, debug, debug_info, info, trace, utils::stream::IterStream, +}; use database::Database; use futures::{Stream, StreamExt, TryStreamExt}; use tokio::sync::Mutex; @@ -52,7 +50,7 @@ impl Services { #[allow(clippy::cognitive_complexity)] pub async fn build(server: Arc) -> Result> { let db = Database::open(&server).await?; - let service: Arc = Arc::new(RwLock::new(BTreeMap::new())); + let service: Arc = Arc::new(SyncRwLock::new(BTreeMap::new())); macro_rules! build { ($tyname:ty) => {{ let built = <$tyname>::build(Args { @@ -193,7 +191,7 @@ impl Services { fn interrupt(&self) { debug!("Interrupting services..."); - for (name, (service, ..)) in self.service.read().expect("locked for reading").iter() { + for (name, (service, ..)) in self.service.read().iter() { if let Some(service) = service.upgrade() { trace!("Interrupting {name}"); service.interrupt(); @@ -205,7 +203,6 @@ impl Services { fn services(&self) -> impl Stream> + Send { self.service .read() - .expect("locked for reading") .values() .filter_map(|val| val.0.upgrade()) .collect::>() @@ -233,10 +230,9 @@ impl Services { #[allow(clippy::needless_pass_by_value)] fn add_service(map: &Arc, s: Arc, a: Arc) { let name = s.name(); - let len = map.read().expect("locked for reading").len(); + let len = map.read().len(); trace!("built service #{len}: {name:?}"); map.write() - .expect("locked for writing") .insert(name.to_owned(), (Arc::downgrade(&s), Arc::downgrade(&a))); } diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 7735c87f..acd3dd86 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,10 +1,10 @@ use std::{ collections::{BTreeMap, HashSet}, - sync::{Arc, RwLock}, + sync::Arc, }; use conduwuit::{ - Err, Error, Result, err, error, implement, utils, + Err, Error, Result, SyncRwLock, err, error, implement, utils, utils::{hash, string::EMPTY}, }; use database::{Deserialized, Json, Map}; @@ -19,7 +19,7 @@ use ruma::{ use crate::{Dep, config, globals, users}; pub struct Service { - userdevicesessionid_uiaarequest: RwLock, + userdevicesessionid_uiaarequest: SyncRwLock, db: Data, services: Services, } @@ -42,7 +42,7 @@ pub const SESSION_ID_LENGTH: usize = 32; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - userdevicesessionid_uiaarequest: RwLock::new(RequestMap::new()), + userdevicesessionid_uiaarequest: SyncRwLock::new(RequestMap::new()), db: Data { userdevicesessionid_uiaainfo: args.db["userdevicesessionid_uiaainfo"].clone(), }, @@ -268,7 +268,6 @@ fn set_uiaa_request( let key = (user_id.to_owned(), device_id.to_owned(), session.to_owned()); self.userdevicesessionid_uiaarequest .write() - .expect("locked for writing") .insert(key, request.to_owned()); } @@ -287,7 +286,6 @@ pub fn get_uiaa_request( self.userdevicesessionid_uiaarequest .read() - .expect("locked for reading") .get(&key) .cloned() } From 374fb2745c47e8b26e0b5daa435ace709f446498 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Jul 2025 22:05:43 +0100 Subject: [PATCH 29/49] refactor: Replace remaining std Mutexes --- src/database/engine/backup.rs | 2 +- src/database/engine/cf_opts.rs | 2 +- src/database/engine/context.rs | 15 +++--- src/database/engine/memory_usage.rs | 6 +-- src/database/engine/open.rs | 6 +-- src/database/pool.rs | 10 ++-- src/service/admin/console.rs | 56 ++++++++------------- src/service/rooms/auth_chain/data.rs | 24 +++------ src/service/rooms/auth_chain/mod.rs | 4 +- src/service/rooms/state_compressor/mod.rs | 14 +++--- src/service/sync/mod.rs | 61 +++++++++++------------ 11 files changed, 83 insertions(+), 117 deletions(-) diff --git a/src/database/engine/backup.rs b/src/database/engine/backup.rs index ac72e6d4..4cdb6172 100644 --- a/src/database/engine/backup.rs +++ b/src/database/engine/backup.rs @@ -71,7 +71,7 @@ pub fn backup_count(&self) -> Result { fn backup_engine(&self) -> Result { let path = self.backup_path()?; let options = BackupEngineOptions::new(path).map_err(map_err)?; - BackupEngine::open(&options, &*self.ctx.env.lock()?).map_err(map_err) + BackupEngine::open(&options, &self.ctx.env.lock()).map_err(map_err) } #[implement(Engine)] diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index cbbd1012..58358f02 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -232,7 +232,7 @@ fn get_cache(ctx: &Context, desc: &Descriptor) -> Option { cache_opts.set_num_shard_bits(shard_bits); cache_opts.set_capacity(size); - let mut caches = ctx.col_cache.lock().expect("locked"); + let mut caches = ctx.col_cache.lock(); match desc.cache_disp { | CacheDisp::Unique if desc.cache_size == 0 => None, | CacheDisp::Unique => { diff --git a/src/database/engine/context.rs b/src/database/engine/context.rs index 380e37af..3b9238bd 100644 --- a/src/database/engine/context.rs +++ b/src/database/engine/context.rs @@ -1,9 +1,6 @@ -use std::{ - collections::BTreeMap, - sync::{Arc, Mutex}, -}; +use std::{collections::BTreeMap, sync::Arc}; -use conduwuit::{Result, Server, debug, utils::math::usize_from_f64}; +use conduwuit::{Result, Server, SyncMutex, debug, utils::math::usize_from_f64}; use rocksdb::{Cache, Env, LruCacheOptions}; use crate::{or_else, pool::Pool}; @@ -14,9 +11,9 @@ use crate::{or_else, pool::Pool}; /// These assets are housed in the shared Context. pub(crate) struct Context { pub(crate) pool: Arc, - pub(crate) col_cache: Mutex>, - pub(crate) row_cache: Mutex, - pub(crate) env: Mutex, + pub(crate) col_cache: SyncMutex>, + pub(crate) row_cache: SyncMutex, + pub(crate) env: SyncMutex, pub(crate) server: Arc, } @@ -68,7 +65,7 @@ impl Drop for Context { debug!("Closing frontend pool"); self.pool.close(); - let mut env = self.env.lock().expect("locked"); + let mut env = self.env.lock(); debug!("Shutting down background threads"); env.set_high_priority_background_threads(0); diff --git a/src/database/engine/memory_usage.rs b/src/database/engine/memory_usage.rs index 9bb5c535..21af35c8 100644 --- a/src/database/engine/memory_usage.rs +++ b/src/database/engine/memory_usage.rs @@ -9,7 +9,7 @@ use crate::or_else; #[implement(Engine)] pub fn memory_usage(&self) -> Result { let mut res = String::new(); - let stats = get_memory_usage_stats(Some(&[&self.db]), Some(&[&*self.ctx.row_cache.lock()?])) + let stats = get_memory_usage_stats(Some(&[&self.db]), Some(&[&*self.ctx.row_cache.lock()])) .or_else(or_else)?; let mibs = |input| f64::from(u32::try_from(input / 1024).unwrap_or(0)) / 1024.0; writeln!( @@ -19,10 +19,10 @@ pub fn memory_usage(&self) -> Result { mibs(stats.mem_table_total), mibs(stats.mem_table_unflushed), mibs(stats.mem_table_readers_total), - mibs(u64::try_from(self.ctx.row_cache.lock()?.get_usage())?), + mibs(u64::try_from(self.ctx.row_cache.lock().get_usage())?), )?; - for (name, cache) in &*self.ctx.col_cache.lock()? { + for (name, cache) in &*self.ctx.col_cache.lock() { writeln!(res, "{name} cache: {:.2} MiB", mibs(u64::try_from(cache.get_usage())?))?; } diff --git a/src/database/engine/open.rs b/src/database/engine/open.rs index 84e59a6a..7b9d93c2 100644 --- a/src/database/engine/open.rs +++ b/src/database/engine/open.rs @@ -23,11 +23,7 @@ pub(crate) async fn open(ctx: Arc, desc: &[Descriptor]) -> Result, queues: Vec>, - workers: Mutex>>, + workers: SyncMutex>>, topology: Vec, busy: AtomicUsize, queued_max: AtomicUsize, @@ -115,7 +115,7 @@ impl Drop for Pool { #[implement(Pool)] #[tracing::instrument(skip_all)] pub(crate) fn close(&self) { - let workers = take(&mut *self.workers.lock().expect("locked")); + let workers = take(&mut *self.workers.lock()); let senders = self.queues.iter().map(Sender::sender_count).sum::(); @@ -154,7 +154,7 @@ pub(crate) fn close(&self) { #[implement(Pool)] fn spawn_until(self: &Arc, recv: &[Receiver], count: usize) -> Result { - let mut workers = self.workers.lock().expect("locked"); + let mut workers = self.workers.lock(); while workers.len() < count { self.clone().spawn_one(&mut workers, recv)?; } diff --git a/src/service/admin/console.rs b/src/service/admin/console.rs index 02f41303..931bb719 100644 --- a/src/service/admin/console.rs +++ b/src/service/admin/console.rs @@ -1,11 +1,8 @@ #![cfg(feature = "console")] -use std::{ - collections::VecDeque, - sync::{Arc, Mutex}, -}; +use std::{collections::VecDeque, sync::Arc}; -use conduwuit::{Server, debug, defer, error, log, log::is_systemd_mode}; +use conduwuit::{Server, SyncMutex, debug, defer, error, log, log::is_systemd_mode}; use futures::future::{AbortHandle, Abortable}; use ruma::events::room::message::RoomMessageEventContent; use rustyline_async::{Readline, ReadlineError, ReadlineEvent}; @@ -17,10 +14,10 @@ use crate::{Dep, admin}; pub struct Console { server: Arc, admin: Dep, - worker_join: Mutex>>, - input_abort: Mutex>, - command_abort: Mutex>, - history: Mutex>, + worker_join: SyncMutex>>, + input_abort: SyncMutex>, + command_abort: SyncMutex>, + history: SyncMutex>, output: MadSkin, } @@ -50,7 +47,7 @@ impl Console { } pub async fn start(self: &Arc) { - let mut worker_join = self.worker_join.lock().expect("locked"); + let mut worker_join = self.worker_join.lock(); if worker_join.is_none() { let self_ = Arc::clone(self); _ = worker_join.insert(self.server.runtime().spawn(self_.worker())); @@ -60,7 +57,7 @@ impl Console { pub async fn close(self: &Arc) { self.interrupt(); - let Some(worker_join) = self.worker_join.lock().expect("locked").take() else { + let Some(worker_join) = self.worker_join.lock().take() else { return; }; @@ -70,22 +67,18 @@ impl Console { pub fn interrupt(self: &Arc) { self.interrupt_command(); self.interrupt_readline(); - self.worker_join - .lock() - .expect("locked") - .as_ref() - .map(JoinHandle::abort); + self.worker_join.lock().as_ref().map(JoinHandle::abort); } pub fn interrupt_readline(self: &Arc) { - if let Some(input_abort) = self.input_abort.lock().expect("locked").take() { + if let Some(input_abort) = self.input_abort.lock().take() { debug!("Interrupting console readline..."); input_abort.abort(); } } pub fn interrupt_command(self: &Arc) { - if let Some(command_abort) = self.command_abort.lock().expect("locked").take() { + if let Some(command_abort) = self.command_abort.lock().take() { debug!("Interrupting console command..."); command_abort.abort(); } @@ -120,7 +113,7 @@ impl Console { } debug!("session ending"); - self.worker_join.lock().expect("locked").take(); + self.worker_join.lock().take(); } async fn readline(self: &Arc) -> Result { @@ -135,9 +128,9 @@ impl Console { let (abort, abort_reg) = AbortHandle::new_pair(); let future = Abortable::new(future, abort_reg); - _ = self.input_abort.lock().expect("locked").insert(abort); + _ = self.input_abort.lock().insert(abort); defer! {{ - _ = self.input_abort.lock().expect("locked").take(); + _ = self.input_abort.lock().take(); }} let Ok(result) = future.await else { @@ -158,9 +151,9 @@ impl Console { let (abort, abort_reg) = AbortHandle::new_pair(); let future = Abortable::new(future, abort_reg); - _ = self.command_abort.lock().expect("locked").insert(abort); + _ = self.command_abort.lock().insert(abort); defer! {{ - _ = self.command_abort.lock().expect("locked").take(); + _ = self.command_abort.lock().take(); }} _ = future.await; @@ -184,20 +177,15 @@ impl Console { } fn set_history(&self, readline: &mut Readline) { - self.history - .lock() - .expect("locked") - .iter() - .rev() - .for_each(|entry| { - readline - .add_history_entry(entry.clone()) - .expect("added history entry"); - }); + self.history.lock().iter().rev().for_each(|entry| { + readline + .add_history_entry(entry.clone()) + .expect("added history entry"); + }); } fn add_history(&self, line: String) { - let mut history = self.history.lock().expect("locked"); + let mut history = self.history.lock(); history.push_front(line); history.truncate(HISTORY_LIMIT); } diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 8c3588cc..e9e40979 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -1,9 +1,6 @@ -use std::{ - mem::size_of, - sync::{Arc, Mutex}, -}; +use std::{mem::size_of, sync::Arc}; -use conduwuit::{Err, Result, err, utils, utils::math::usize_from_f64}; +use conduwuit::{Err, Result, SyncMutex, err, utils, utils::math::usize_from_f64}; use database::Map; use lru_cache::LruCache; @@ -11,7 +8,7 @@ use crate::rooms::short::ShortEventId; pub(super) struct Data { shorteventid_authchain: Arc, - pub(super) auth_chain_cache: Mutex, Arc<[ShortEventId]>>>, + pub(super) auth_chain_cache: SyncMutex, Arc<[ShortEventId]>>>, } impl Data { @@ -23,7 +20,7 @@ impl Data { .expect("valid cache size"); Self { shorteventid_authchain: db["shorteventid_authchain"].clone(), - auth_chain_cache: Mutex::new(LruCache::new(cache_size)), + auth_chain_cache: SyncMutex::new(LruCache::new(cache_size)), } } @@ -34,12 +31,7 @@ impl Data { debug_assert!(!key.is_empty(), "auth_chain key must not be empty"); // Check RAM cache - if let Some(result) = self - .auth_chain_cache - .lock() - .expect("cache locked") - .get_mut(key) - { + if let Some(result) = self.auth_chain_cache.lock().get_mut(key) { return Ok(Arc::clone(result)); } @@ -63,7 +55,6 @@ impl Data { // Cache in RAM self.auth_chain_cache .lock() - .expect("cache locked") .insert(vec![key[0]], Arc::clone(&chain)); Ok(chain) @@ -84,9 +75,6 @@ impl Data { } // Cache in RAM - self.auth_chain_cache - .lock() - .expect("cache locked") - .insert(key, auth_chain); + self.auth_chain_cache.lock().insert(key, auth_chain); } } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 0903ea75..79d4d070 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -248,10 +248,10 @@ pub fn cache_auth_chain_vec(&self, key: Vec, auth_chain: &[ShortEventId]) { #[implement(Service)] pub fn get_cache_usage(&self) -> (usize, usize) { - let cache = self.db.auth_chain_cache.lock().expect("locked"); + let cache = self.db.auth_chain_cache.lock(); (cache.len(), cache.capacity()) } #[implement(Service)] -pub fn clear_cache(&self) { self.db.auth_chain_cache.lock().expect("locked").clear(); } +pub fn clear_cache(&self) { self.db.auth_chain_cache.lock().clear(); } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index a33fb342..f7f7d043 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -2,12 +2,12 @@ use std::{ collections::{BTreeSet, HashMap}, fmt::{Debug, Write}, mem::size_of, - sync::{Arc, Mutex}, + sync::Arc, }; use async_trait::async_trait; use conduwuit::{ - Result, + Result, SyncMutex, arrayvec::ArrayVec, at, checked, err, expected, implement, utils, utils::{bytes, math::usize_from_f64, stream::IterStream}, @@ -23,7 +23,7 @@ use crate::{ }; pub struct Service { - pub stateinfo_cache: Mutex, + pub stateinfo_cache: SyncMutex, db: Data, services: Services, } @@ -86,7 +86,7 @@ impl crate::Service for Service { async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { let (cache_len, ents) = { - let cache = self.stateinfo_cache.lock().expect("locked"); + let cache = self.stateinfo_cache.lock(); let ents = cache.iter().map(at!(1)).flat_map(|vec| vec.iter()).fold( HashMap::new(), |mut ents, ssi| { @@ -110,7 +110,7 @@ impl crate::Service for Service { Ok(()) } - async fn clear_cache(&self) { self.stateinfo_cache.lock().expect("locked").clear(); } + async fn clear_cache(&self) { self.stateinfo_cache.lock().clear(); } fn name(&self) -> &str { crate::service::make_name(std::module_path!()) } } @@ -123,7 +123,7 @@ pub async fn load_shortstatehash_info( &self, shortstatehash: ShortStateHash, ) -> Result { - if let Some(r) = self.stateinfo_cache.lock()?.get_mut(&shortstatehash) { + if let Some(r) = self.stateinfo_cache.lock().get_mut(&shortstatehash) { return Ok(r.clone()); } @@ -152,7 +152,7 @@ async fn cache_shortstatehash_info( shortstatehash: ShortStateHash, stack: ShortStateInfoVec, ) -> Result { - self.stateinfo_cache.lock()?.insert(shortstatehash, stack); + self.stateinfo_cache.lock().insert(shortstatehash, stack); Ok(()) } diff --git a/src/service/sync/mod.rs b/src/service/sync/mod.rs index b095d2c1..6ac579f4 100644 --- a/src/service/sync/mod.rs +++ b/src/service/sync/mod.rs @@ -2,10 +2,10 @@ mod watch; use std::{ collections::{BTreeMap, BTreeSet}, - sync::{Arc, Mutex, Mutex as StdMutex}, + sync::Arc, }; -use conduwuit::{Result, Server}; +use conduwuit::{Result, Server, SyncMutex}; use database::Map; use ruma::{ OwnedDeviceId, OwnedRoomId, OwnedUserId, @@ -62,11 +62,11 @@ struct SnakeSyncCache { extensions: v5::request::Extensions, } -type DbConnections = Mutex>; +type DbConnections = SyncMutex>; type DbConnectionsKey = (OwnedUserId, OwnedDeviceId, String); -type DbConnectionsVal = Arc>; +type DbConnectionsVal = Arc>; type SnakeConnectionsKey = (OwnedUserId, OwnedDeviceId, Option); -type SnakeConnectionsVal = Arc>; +type SnakeConnectionsVal = Arc>; impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { @@ -90,8 +90,8 @@ impl crate::Service for Service { state_cache: args.depend::("rooms::state_cache"), typing: args.depend::("rooms::typing"), }, - connections: StdMutex::new(BTreeMap::new()), - snake_connections: StdMutex::new(BTreeMap::new()), + connections: SyncMutex::new(BTreeMap::new()), + snake_connections: SyncMutex::new(BTreeMap::new()), })) } @@ -100,22 +100,19 @@ impl crate::Service for Service { impl Service { pub fn snake_connection_cached(&self, key: &SnakeConnectionsKey) -> bool { - self.snake_connections - .lock() - .expect("locked") - .contains_key(key) + self.snake_connections.lock().contains_key(key) } pub fn forget_snake_sync_connection(&self, key: &SnakeConnectionsKey) { - self.snake_connections.lock().expect("locked").remove(key); + self.snake_connections.lock().remove(key); } pub fn remembered(&self, key: &DbConnectionsKey) -> bool { - self.connections.lock().expect("locked").contains_key(key) + self.connections.lock().contains_key(key) } pub fn forget_sync_request_connection(&self, key: &DbConnectionsKey) { - self.connections.lock().expect("locked").remove(key); + self.connections.lock().remove(key); } pub fn update_snake_sync_request_with_cache( @@ -123,13 +120,13 @@ impl Service { snake_key: &SnakeConnectionsKey, request: &mut v5::Request, ) -> BTreeMap> { - let mut cache = self.snake_connections.lock().expect("locked"); + let mut cache = self.snake_connections.lock(); let cached = Arc::clone( cache .entry(snake_key.clone()) - .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), + .or_insert_with(|| Arc::new(SyncMutex::new(SnakeSyncCache::default()))), ); - let cached = &mut cached.lock().expect("locked"); + let cached = &mut cached.lock(); drop(cache); //v5::Request::try_from_http_request(req, path_args); @@ -232,16 +229,16 @@ impl Service { }; let key = into_db_key(key.0.clone(), key.1.clone(), conn_id); - let mut cache = self.connections.lock().expect("locked"); + let mut cache = self.connections.lock(); let cached = Arc::clone(cache.entry(key).or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { + Arc::new(SyncMutex::new(SlidingSyncCache { lists: BTreeMap::new(), subscriptions: BTreeMap::new(), known_rooms: BTreeMap::new(), extensions: ExtensionsConfig::default(), })) })); - let cached = &mut cached.lock().expect("locked"); + let cached = &mut cached.lock(); drop(cache); for (list_id, list) in &mut request.lists { @@ -328,16 +325,16 @@ impl Service { key: &DbConnectionsKey, subscriptions: BTreeMap, ) { - let mut cache = self.connections.lock().expect("locked"); + let mut cache = self.connections.lock(); let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { + Arc::new(SyncMutex::new(SlidingSyncCache { lists: BTreeMap::new(), subscriptions: BTreeMap::new(), known_rooms: BTreeMap::new(), extensions: ExtensionsConfig::default(), })) })); - let cached = &mut cached.lock().expect("locked"); + let cached = &mut cached.lock(); drop(cache); cached.subscriptions = subscriptions; @@ -350,16 +347,16 @@ impl Service { new_cached_rooms: BTreeSet, globalsince: u64, ) { - let mut cache = self.connections.lock().expect("locked"); + let mut cache = self.connections.lock(); let cached = Arc::clone(cache.entry(key.clone()).or_insert_with(|| { - Arc::new(Mutex::new(SlidingSyncCache { + Arc::new(SyncMutex::new(SlidingSyncCache { lists: BTreeMap::new(), subscriptions: BTreeMap::new(), known_rooms: BTreeMap::new(), extensions: ExtensionsConfig::default(), })) })); - let cached = &mut cached.lock().expect("locked"); + let cached = &mut cached.lock(); drop(cache); for (room_id, lastsince) in cached @@ -386,13 +383,13 @@ impl Service { globalsince: u64, ) { assert!(key.2.is_some(), "Some(conn_id) required for this call"); - let mut cache = self.snake_connections.lock().expect("locked"); + let mut cache = self.snake_connections.lock(); let cached = Arc::clone( cache .entry(key.clone()) - .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), + .or_insert_with(|| Arc::new(SyncMutex::new(SnakeSyncCache::default()))), ); - let cached = &mut cached.lock().expect("locked"); + let cached = &mut cached.lock(); drop(cache); for (room_id, lastsince) in cached @@ -416,13 +413,13 @@ impl Service { key: &SnakeConnectionsKey, subscriptions: BTreeMap, ) { - let mut cache = self.snake_connections.lock().expect("locked"); + let mut cache = self.snake_connections.lock(); let cached = Arc::clone( cache .entry(key.clone()) - .or_insert_with(|| Arc::new(Mutex::new(SnakeSyncCache::default()))), + .or_insert_with(|| Arc::new(SyncMutex::new(SnakeSyncCache::default()))), ); - let cached = &mut cached.lock().expect("locked"); + let cached = &mut cached.lock(); drop(cache); cached.subscriptions = subscriptions; From 6d29098d1af955d98ec57da4593836e67e8df090 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Jul 2025 22:20:26 +0100 Subject: [PATCH 30/49] refactor: Replace remaining std RwLocks --- src/admin/federation/commands.rs | 3 +-- src/admin/mod.rs | 12 ++------- src/core/alloc/je.rs | 7 +++--- src/service/admin/mod.rs | 25 +++++++------------ src/service/globals/mod.rs | 22 +++++----------- .../fetch_and_handle_outliers.rs | 3 --- .../event_handler/handle_incoming_pdu.rs | 3 --- .../rooms/event_handler/handle_prev_pdu.rs | 3 --- src/service/rooms/event_handler/mod.rs | 17 +++---------- src/service/rooms/state_cache/mod.rs | 22 +++++----------- src/service/rooms/state_cache/update.rs | 5 +--- 11 files changed, 32 insertions(+), 90 deletions(-) diff --git a/src/admin/federation/commands.rs b/src/admin/federation/commands.rs index 545dcbca..f77dadab 100644 --- a/src/admin/federation/commands.rs +++ b/src/admin/federation/commands.rs @@ -26,8 +26,7 @@ pub(super) async fn incoming_federation(&self) -> Result { .rooms .event_handler .federation_handletime - .read() - .expect("locked"); + .read(); let mut msg = format!("Handling {} incoming pdus:\n", map.len()); for (r, (e, i)) in map.iter() { diff --git a/src/admin/mod.rs b/src/admin/mod.rs index 732b8ce0..1d46590b 100644 --- a/src/admin/mod.rs +++ b/src/admin/mod.rs @@ -37,11 +37,7 @@ pub use crate::admin::AdminCommand; /// Install the admin command processor pub async fn init(admin_service: &service::admin::Service) { - _ = admin_service - .complete - .write() - .expect("locked for writing") - .insert(processor::complete); + _ = admin_service.complete.write().insert(processor::complete); _ = admin_service .handle .write() @@ -52,9 +48,5 @@ pub async fn init(admin_service: &service::admin::Service) { /// Uninstall the admin command handler pub async fn fini(admin_service: &service::admin::Service) { _ = admin_service.handle.write().await.take(); - _ = admin_service - .complete - .write() - .expect("locked for writing") - .take(); + _ = admin_service.complete.write().take(); } diff --git a/src/core/alloc/je.rs b/src/core/alloc/je.rs index e138233e..77deebc5 100644 --- a/src/core/alloc/je.rs +++ b/src/core/alloc/je.rs @@ -4,7 +4,6 @@ use std::{ cell::OnceCell, ffi::{CStr, c_char, c_void}, fmt::Debug, - sync::RwLock, }; use arrayvec::ArrayVec; @@ -13,7 +12,7 @@ use tikv_jemalloc_sys as ffi; use tikv_jemallocator as jemalloc; use crate::{ - Result, err, is_equal_to, is_nonzero, + Result, SyncRwLock, err, is_equal_to, is_nonzero, utils::{math, math::Tried}, }; @@ -40,7 +39,7 @@ const MALLOC_CONF_PROF: &str = ""; #[global_allocator] static JEMALLOC: jemalloc::Jemalloc = jemalloc::Jemalloc; -static CONTROL: RwLock<()> = RwLock::new(()); +static CONTROL: SyncRwLock<()> = SyncRwLock::new(()); type Name = ArrayVec; type Key = ArrayVec; @@ -332,7 +331,7 @@ fn set(key: &Key, val: T) -> Result where T: Copy + Debug, { - let _lock = CONTROL.write()?; + let _lock = CONTROL.write(); let res = xchg(key, val)?; inc_epoch()?; diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index f496c414..c052198c 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -5,11 +5,11 @@ mod grant; use std::{ pin::Pin, - sync::{Arc, RwLock as StdRwLock, Weak}, + sync::{Arc, Weak}, }; use async_trait::async_trait; -use conduwuit::{Err, utils}; +use conduwuit::{Err, SyncRwLock, utils}; use conduwuit_core::{ Error, Event, Result, Server, debug, err, error, error::default_log, pdu::PduBuilder, }; @@ -36,7 +36,7 @@ pub struct Service { services: Services, channel: (Sender, Receiver), pub handle: RwLock>, - pub complete: StdRwLock>, + pub complete: SyncRwLock>, #[cfg(feature = "console")] pub console: Arc, } @@ -50,7 +50,7 @@ struct Services { state_cache: Dep, state_accessor: Dep, account_data: Dep, - services: StdRwLock>>, + services: SyncRwLock>>, media: Dep, } @@ -105,7 +105,7 @@ impl crate::Service for Service { }, channel: loole::bounded(COMMAND_QUEUE_LIMIT), handle: RwLock::new(None), - complete: StdRwLock::new(None), + complete: SyncRwLock::new(None), #[cfg(feature = "console")] console: console::Console::new(&args), })) @@ -312,10 +312,7 @@ impl Service { /// Invokes the tab-completer to complete the command. When unavailable, /// None is returned. pub fn complete_command(&self, command: &str) -> Option { - self.complete - .read() - .expect("locked for reading") - .map(|complete| complete(command)) + self.complete.read().map(|complete| complete(command)) } async fn handle_signal(&self, sig: &'static str) { @@ -338,17 +335,13 @@ impl Service { } async fn process_command(&self, command: CommandInput) -> ProcessorResult { - let handle = &self - .handle - .read() - .await - .expect("Admin module is not loaded"); + let handle_guard = self.handle.read().await; + let handle = handle_guard.as_ref().expect("Admin module is not loaded"); let services = self .services .services .read() - .expect("locked") .as_ref() .and_then(Weak::upgrade) .expect("Services self-reference not initialized."); @@ -523,7 +516,7 @@ impl Service { /// Sets the self-reference to crate::Services which will provide context to /// the admin commands. pub(super) fn set_services(&self, services: Option<&Arc>) { - let receiver = &mut *self.services.services.write().expect("locked for writing"); + let receiver = &mut *self.services.services.write(); let weak = services.map(Arc::downgrade); *receiver = weak; } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index a23a4c21..12f2ec78 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -1,14 +1,9 @@ mod data; -use std::{ - collections::HashMap, - fmt::Write, - sync::{Arc, RwLock}, - time::Instant, -}; +use std::{collections::HashMap, fmt::Write, sync::Arc, time::Instant}; use async_trait::async_trait; -use conduwuit::{Result, Server, error, utils::bytes::pretty}; +use conduwuit::{Result, Server, SyncRwLock, error, utils::bytes::pretty}; use data::Data; use regex::RegexSet; use ruma::{OwnedEventId, OwnedRoomAliasId, OwnedServerName, OwnedUserId, ServerName, UserId}; @@ -19,7 +14,7 @@ pub struct Service { pub db: Data, server: Arc, - pub bad_event_ratelimiter: Arc>>, + pub bad_event_ratelimiter: Arc>>, pub server_user: OwnedUserId, pub admin_alias: OwnedRoomAliasId, pub turn_secret: String, @@ -62,7 +57,7 @@ impl crate::Service for Service { Ok(Arc::new(Self { db, server: args.server.clone(), - bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + bad_event_ratelimiter: Arc::new(SyncRwLock::new(HashMap::new())), admin_alias: OwnedRoomAliasId::try_from(format!("#admins:{}", &args.server.name)) .expect("#admins:server_name is valid alias name"), server_user: UserId::parse_with_server_name( @@ -76,7 +71,7 @@ impl crate::Service for Service { } async fn memory_usage(&self, out: &mut (dyn Write + Send)) -> Result { - let (ber_count, ber_bytes) = self.bad_event_ratelimiter.read()?.iter().fold( + let (ber_count, ber_bytes) = self.bad_event_ratelimiter.read().iter().fold( (0_usize, 0_usize), |(mut count, mut bytes), (event_id, _)| { bytes = bytes.saturating_add(event_id.capacity()); @@ -91,12 +86,7 @@ impl crate::Service for Service { Ok(()) } - async fn clear_cache(&self) { - self.bad_event_ratelimiter - .write() - .expect("locked for writing") - .clear(); - } + async fn clear_cache(&self) { self.bad_event_ratelimiter.write().clear(); } fn name(&self) -> &str { service::make_name(std::module_path!()) } } diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 44027e04..59b768f2 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -41,7 +41,6 @@ where .globals .bad_event_ratelimiter .write() - .expect("locked") .entry(id) { | hash_map::Entry::Vacant(e) => { @@ -76,7 +75,6 @@ where .globals .bad_event_ratelimiter .read() - .expect("locked") .get(&*next_id) { // Exponential backoff @@ -187,7 +185,6 @@ where .globals .bad_event_ratelimiter .read() - .expect("locked") .get(&*next_id) { // Exponential backoff diff --git a/src/service/rooms/event_handler/handle_incoming_pdu.rs b/src/service/rooms/event_handler/handle_incoming_pdu.rs index 86a05e0a..5299e8d4 100644 --- a/src/service/rooms/event_handler/handle_incoming_pdu.rs +++ b/src/service/rooms/event_handler/handle_incoming_pdu.rs @@ -160,7 +160,6 @@ pub async fn handle_incoming_pdu<'a>( .globals .bad_event_ratelimiter .write() - .expect("locked") .entry(prev_id.into()) { | hash_map::Entry::Vacant(e) => { @@ -181,13 +180,11 @@ pub async fn handle_incoming_pdu<'a>( let start_time = Instant::now(); self.federation_handletime .write() - .expect("locked") .insert(room_id.into(), (event_id.to_owned(), start_time)); defer! {{ self.federation_handletime .write() - .expect("locked") .remove(room_id); }}; diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index cd46310a..cb4978d9 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -42,7 +42,6 @@ where .globals .bad_event_ratelimiter .read() - .expect("locked") .get(prev_id) { // Exponential backoff @@ -70,13 +69,11 @@ where let start_time = Instant::now(); self.federation_handletime .write() - .expect("locked") .insert(room_id.into(), ((*prev_id).to_owned(), start_time)); defer! {{ self.federation_handletime .write() - .expect("locked") .remove(room_id); }}; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index aed38e1e..4e59c207 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -10,15 +10,10 @@ mod resolve_state; mod state_at_incoming; mod upgrade_outlier_pdu; -use std::{ - collections::HashMap, - fmt::Write, - sync::{Arc, RwLock as StdRwLock}, - time::Instant, -}; +use std::{collections::HashMap, fmt::Write, sync::Arc, time::Instant}; use async_trait::async_trait; -use conduwuit::{Err, Event, PduEvent, Result, RoomVersion, Server, utils::MutexMap}; +use conduwuit::{Err, Event, PduEvent, Result, RoomVersion, Server, SyncRwLock, utils::MutexMap}; use ruma::{ OwnedEventId, OwnedRoomId, RoomId, RoomVersionId, events::room::create::RoomCreateEventContent, @@ -28,7 +23,7 @@ use crate::{Dep, globals, rooms, sending, server_keys}; pub struct Service { pub mutex_federation: RoomMutexMap, - pub federation_handletime: StdRwLock, + pub federation_handletime: SyncRwLock, services: Services, } @@ -81,11 +76,7 @@ impl crate::Service for Service { let mutex_federation = self.mutex_federation.len(); writeln!(out, "federation_mutex: {mutex_federation}")?; - let federation_handletime = self - .federation_handletime - .read() - .expect("locked for reading") - .len(); + let federation_handletime = self.federation_handletime.read().len(); writeln!(out, "federation_handletime: {federation_handletime}")?; Ok(()) diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 9429be79..e9845fbf 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -1,13 +1,10 @@ mod update; mod via; -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; +use std::{collections::HashMap, sync::Arc}; use conduwuit::{ - Result, implement, + Result, SyncRwLock, implement, result::LogErr, utils::{ReadyExt, stream::TryIgnore}, warn, @@ -54,14 +51,14 @@ struct Data { userroomid_knockedstate: Arc, } -type AppServiceInRoomCache = RwLock>>; +type AppServiceInRoomCache = SyncRwLock>>; type StrippedStateEventItem = (OwnedRoomId, Vec>); type SyncStateEventItem = (OwnedRoomId, Vec>); impl crate::Service for Service { fn build(args: crate::Args<'_>) -> Result> { Ok(Arc::new(Self { - appservice_in_room_cache: RwLock::new(HashMap::new()), + appservice_in_room_cache: SyncRwLock::new(HashMap::new()), services: Services { account_data: args.depend::("account_data"), config: args.depend::("config"), @@ -99,7 +96,6 @@ pub async fn appservice_in_room(&self, room_id: &RoomId, appservice: &Registrati if let Some(cached) = self .appservice_in_room_cache .read() - .expect("locked") .get(room_id) .and_then(|map| map.get(&appservice.registration.id)) .copied() @@ -124,7 +120,6 @@ pub async fn appservice_in_room(&self, room_id: &RoomId, appservice: &Registrati self.appservice_in_room_cache .write() - .expect("locked") .entry(room_id.into()) .or_default() .insert(appservice.registration.id.clone(), in_room); @@ -134,19 +129,14 @@ pub async fn appservice_in_room(&self, room_id: &RoomId, appservice: &Registrati #[implement(Service)] pub fn get_appservice_in_room_cache_usage(&self) -> (usize, usize) { - let cache = self.appservice_in_room_cache.read().expect("locked"); + let cache = self.appservice_in_room_cache.read(); (cache.len(), cache.capacity()) } #[implement(Service)] #[tracing::instrument(level = "debug", skip_all)] -pub fn clear_appservice_in_room_cache(&self) { - self.appservice_in_room_cache - .write() - .expect("locked") - .clear(); -} +pub fn clear_appservice_in_room_cache(&self) { self.appservice_in_room_cache.write().clear(); } /// Returns an iterator of all servers participating in this room. #[implement(Service)] diff --git a/src/service/rooms/state_cache/update.rs b/src/service/rooms/state_cache/update.rs index 02c6bec6..32c67947 100644 --- a/src/service/rooms/state_cache/update.rs +++ b/src/service/rooms/state_cache/update.rs @@ -211,10 +211,7 @@ pub async fn update_joined_count(&self, room_id: &RoomId) { self.db.serverroomids.put_raw(serverroom_id, []); } - self.appservice_in_room_cache - .write() - .expect("locked") - .remove(room_id); + self.appservice_in_room_cache.write().remove(room_id); } /// Direct DB function to directly mark a user as joined. It is not From b635e825d2ba002170ff1c25a26270f875699ca5 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Jul 2025 22:30:41 +0100 Subject: [PATCH 31/49] refactor: Implement with_lock for lock_api --- Cargo.lock | 1 + Cargo.toml | 4 ++-- src/core/Cargo.toml | 1 + src/core/utils/with_lock.rs | 26 +++++++++++++++++++++++++- 4 files changed, 29 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b084f72a..ed9be6d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -963,6 +963,7 @@ dependencies = [ "itertools 0.14.0", "libc", "libloading", + "lock_api", "log", "maplit", "nix", diff --git a/Cargo.toml b/Cargo.toml index 3e52c4b2..54f7ae82 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -519,8 +519,8 @@ version = "1.0" version = "0.12.4" # Use this when extending with_lock::WithLock to parking_lot -# [workspace.dependencies.lock_api] -# version = "0.4.13" +[workspace.dependencies.lock_api] +version = "0.4.13" [workspace.dependencies.bytesize] version = "2.0" diff --git a/src/core/Cargo.toml b/src/core/Cargo.toml index 7a3721d6..462b8e54 100644 --- a/src/core/Cargo.toml +++ b/src/core/Cargo.toml @@ -111,6 +111,7 @@ tracing-subscriber.workspace = true tracing.workspace = true url.workspace = true parking_lot.workspace = true +lock_api.workspace = true [target.'cfg(unix)'.dependencies] nix.workspace = true diff --git a/src/core/utils/with_lock.rs b/src/core/utils/with_lock.rs index 76f014d1..914749de 100644 --- a/src/core/utils/with_lock.rs +++ b/src/core/utils/with_lock.rs @@ -2,7 +2,7 @@ use std::sync::{Arc, Mutex}; -pub trait WithLock { +pub trait WithLock { /// Acquires a lock and executes the given closure with the locked data. fn with_lock(&self, f: F) where @@ -33,6 +33,30 @@ impl WithLock for Arc> { } } +impl WithLock for lock_api::Mutex { + fn with_lock(&self, mut f: F) + where + F: FnMut(&mut T), + { + // The locking and unlocking logic is hidden inside this function. + let mut data_guard = self.lock(); + f(&mut data_guard); + // Lock is released here when `data_guard` goes out of scope. + } +} + +impl WithLock for Arc> { + fn with_lock(&self, mut f: F) + where + F: FnMut(&mut T), + { + // The locking and unlocking logic is hidden inside this function. + let mut data_guard = self.lock(); + f(&mut data_guard); + // Lock is released here when `data_guard` goes out of scope. + } +} + pub trait WithLockAsync { /// Acquires a lock and executes the given closure with the locked data. fn with_lock(&self, f: F) -> impl Future From 1c985c59f57579b0be1437e711bc939954da99c1 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Jul 2025 23:30:31 +0100 Subject: [PATCH 32/49] refactor: Allow with_lock to return data and take an async closure --- src/core/utils/with_lock.rs | 173 ++++++++++++++++++++++++++++++------ 1 file changed, 148 insertions(+), 25 deletions(-) diff --git a/src/core/utils/with_lock.rs b/src/core/utils/with_lock.rs index 914749de..91e8e8d1 100644 --- a/src/core/utils/with_lock.rs +++ b/src/core/utils/with_lock.rs @@ -1,89 +1,212 @@ //! Traits for explicitly scoping the lifetime of locks. -use std::sync::{Arc, Mutex}; +use std::{ + future::Future, + sync::{Arc, Mutex}, +}; pub trait WithLock { - /// Acquires a lock and executes the given closure with the locked data. - fn with_lock(&self, f: F) + /// Acquires a lock and executes the given closure with the locked data, + /// returning the result. + fn with_lock(&self, f: F) -> R where - F: FnMut(&mut T); + F: FnMut(&mut T) -> R; } impl WithLock for Mutex { - fn with_lock(&self, mut f: F) + fn with_lock(&self, mut f: F) -> R where - F: FnMut(&mut T), + F: FnMut(&mut T) -> R, { // The locking and unlocking logic is hidden inside this function. let mut data_guard = self.lock().unwrap(); - f(&mut data_guard); + f(&mut data_guard) // Lock is released here when `data_guard` goes out of scope. } } impl WithLock for Arc> { - fn with_lock(&self, mut f: F) + fn with_lock(&self, mut f: F) -> R where - F: FnMut(&mut T), + F: FnMut(&mut T) -> R, { // The locking and unlocking logic is hidden inside this function. let mut data_guard = self.lock().unwrap(); - f(&mut data_guard); + f(&mut data_guard) // Lock is released here when `data_guard` goes out of scope. } } impl WithLock for lock_api::Mutex { - fn with_lock(&self, mut f: F) + fn with_lock(&self, mut f: F) -> Ret where - F: FnMut(&mut T), + F: FnMut(&mut T) -> Ret, { // The locking and unlocking logic is hidden inside this function. let mut data_guard = self.lock(); - f(&mut data_guard); + f(&mut data_guard) // Lock is released here when `data_guard` goes out of scope. } } impl WithLock for Arc> { - fn with_lock(&self, mut f: F) + fn with_lock(&self, mut f: F) -> Ret where - F: FnMut(&mut T), + F: FnMut(&mut T) -> Ret, { // The locking and unlocking logic is hidden inside this function. let mut data_guard = self.lock(); - f(&mut data_guard); + f(&mut data_guard) // Lock is released here when `data_guard` goes out of scope. } } pub trait WithLockAsync { - /// Acquires a lock and executes the given closure with the locked data. - fn with_lock(&self, f: F) -> impl Future + /// Acquires a lock and executes the given closure with the locked data, + /// returning the result. + fn with_lock(&self, f: F) -> impl Future where - F: FnMut(&mut T); + F: FnMut(&mut T) -> R; + + /// Acquires a lock and executes the given async closure with the locked + /// data. + fn with_lock_async(&self, f: F) -> impl std::future::Future + where + F: AsyncFnMut(&mut T) -> R; } impl WithLockAsync for futures::lock::Mutex { - async fn with_lock(&self, mut f: F) + async fn with_lock(&self, mut f: F) -> R where - F: FnMut(&mut T), + F: FnMut(&mut T) -> R, { // The locking and unlocking logic is hidden inside this function. let mut data_guard = self.lock().await; - f(&mut data_guard); + f(&mut data_guard) + // Lock is released here when `data_guard` goes out of scope. + } + + async fn with_lock_async(&self, mut f: F) -> R + where + F: AsyncFnMut(&mut T) -> R, + { + // The locking and unlocking logic is hidden inside this function. + let mut data_guard = self.lock().await; + f(&mut data_guard).await // Lock is released here when `data_guard` goes out of scope. } } impl WithLockAsync for Arc> { - async fn with_lock(&self, mut f: F) + async fn with_lock(&self, mut f: F) -> R where - F: FnMut(&mut T), + F: FnMut(&mut T) -> R, { // The locking and unlocking logic is hidden inside this function. let mut data_guard = self.lock().await; - f(&mut data_guard); + f(&mut data_guard) + // Lock is released here when `data_guard` goes out of scope. + } + + async fn with_lock_async(&self, mut f: F) -> R + where + F: AsyncFnMut(&mut T) -> R, + { + // The locking and unlocking logic is hidden inside this function. + let mut data_guard = self.lock().await; + f(&mut data_guard).await // Lock is released here when `data_guard` goes out of scope. } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_with_lock_return_value() { + let mutex = Mutex::new(5); + let result = mutex.with_lock(|v| { + *v += 1; + *v * 2 + }); + assert_eq!(result, 12); + let value = mutex.lock().unwrap(); + assert_eq!(*value, 6); + } + + #[test] + fn test_with_lock_unit_return() { + let mutex = Mutex::new(10); + mutex.with_lock(|v| { + *v += 2; + }); + let value = mutex.lock().unwrap(); + assert_eq!(*value, 12); + } + + #[test] + fn test_with_lock_arc_mutex() { + let mutex = Arc::new(Mutex::new(1)); + let result = mutex.with_lock(|v| { + *v *= 10; + *v + }); + assert_eq!(result, 10); + assert_eq!(*mutex.lock().unwrap(), 10); + } + + #[tokio::test] + async fn test_with_lock_async_return_value() { + use futures::lock::Mutex as AsyncMutex; + let mutex = AsyncMutex::new(7); + let result = mutex + .with_lock(|v| { + *v += 3; + *v * 2 + }) + .await; + assert_eq!(result, 20); + let value = mutex.lock().await; + assert_eq!(*value, 10); + } + + #[tokio::test] + async fn test_with_lock_async_unit_return() { + use futures::lock::Mutex as AsyncMutex; + let mutex = AsyncMutex::new(100); + mutex + .with_lock(|v| { + *v -= 50; + }) + .await; + let value = mutex.lock().await; + assert_eq!(*value, 50); + } + + #[tokio::test] + async fn test_with_lock_async_closure() { + use futures::lock::Mutex as AsyncMutex; + let mutex = AsyncMutex::new(1); + mutex + .with_lock_async(async |v| { + *v += 9; + }) + .await; + let value = mutex.lock().await; + assert_eq!(*value, 10); + } + + #[tokio::test] + async fn test_with_lock_async_arc_mutex() { + use futures::lock::Mutex as AsyncMutex; + let mutex = Arc::new(AsyncMutex::new(2)); + mutex + .with_lock_async(async |v: &mut i32| { + *v *= 5; + }) + .await; + let value = mutex.lock().await; + assert_eq!(*value, 10); + } +} From f593cac58aa48a7be73a87d5f53a0b7a5e41dcd8 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Jul 2025 23:32:18 +0100 Subject: [PATCH 33/49] feat: Enable hardware-lock-elision and deadlock_detection --- Cargo.lock | 29 +++++++++++++++++++++++++++++ Cargo.toml | 1 + 2 files changed, 30 insertions(+) diff --git a/Cargo.lock b/Cargo.lock index ed9be6d9..5dce9c59 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1659,6 +1659,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fixedbitset" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" + [[package]] name = "flate2" version = "1.1.2" @@ -3220,10 +3226,13 @@ version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ + "backtrace", "cfg-if", "libc", + "petgraph", "redox_syscall", "smallvec", + "thread-id", "windows-targets 0.52.6", ] @@ -3273,6 +3282,16 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "petgraph" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +dependencies = [ + "fixedbitset", + "indexmap 2.9.0", +] + [[package]] name = "phf" version = "0.11.3" @@ -4894,6 +4913,16 @@ dependencies = [ "syn", ] +[[package]] +name = "thread-id" +version = "4.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe8f25bbdd100db7e1d34acf7fd2dc59c4bf8f7483f505eaa7d4f12f76cc0ea" +dependencies = [ + "libc", + "winapi", +] + [[package]] name = "thread_local" version = "1.1.9" diff --git a/Cargo.toml b/Cargo.toml index 54f7ae82..ab6a9e8a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -517,6 +517,7 @@ version = "1.0" [workspace.dependencies.parking_lot] version = "0.12.4" +features = ["hardware-lock-elision", "deadlock_detection"] # TODO: Check if deadlock_detection has a perf impact, if it does only enable with debug_assertions # Use this when extending with_lock::WithLock to parking_lot [workspace.dependencies.lock_api] From 95610499c7df2d6d1ebe16650431673369975a54 Mon Sep 17 00:00:00 2001 From: Jade Ellis Date: Sat, 19 Jul 2025 23:32:53 +0100 Subject: [PATCH 34/49] chore: Disable direnv's nix flake interfering with cargo cache --- .envrc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.envrc b/.envrc index 952ec2f8..bad73b75 100644 --- a/.envrc +++ b/.envrc @@ -2,6 +2,6 @@ dotenv_if_exists -use flake ".#${DIRENV_DEVSHELL:-default}" +# use flake ".#${DIRENV_DEVSHELL:-default}" PATH_add bin From 9dbae62bec799677e45b7cbda275add80b63fe48 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Fri, 25 Apr 2025 21:06:00 -0700 Subject: [PATCH 35/49] probably incorrectly delete support for non-standardized matrix srv record --- src/service/resolver/actual.rs | 37 ++++++++++++++++------------------ 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index d23ef95a..52cd5d7d 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -306,28 +306,25 @@ impl super::Service { #[tracing::instrument(name = "srv", level = "debug", skip(self))] async fn query_srv_record(&self, hostname: &'_ str) -> Result> { - let hostnames = - [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")]; + self.services.server.check_running()?; - for hostname in hostnames { - self.services.server.check_running()?; + debug!("querying SRV for {hostname:?}"); - debug!("querying SRV for {hostname:?}"); - let hostname = hostname.trim_end_matches('.'); - match self.resolver.resolver.srv_lookup(hostname).await { - | Err(e) => Self::handle_resolve_error(&e, hostname)?, - | Ok(result) => { - return Ok(result.iter().next().map(|result| { - FedDest::Named( - result.target().to_string().trim_end_matches('.').to_owned(), - format!(":{}", result.port()) - .as_str() - .try_into() - .unwrap_or_else(|_| FedDest::default_port()), - ) - })); - }, - } + let hostname_suffix = format!("_matrix-fed._tcp.{hostname}."); + let hostname = hostname_suffix.trim_end_matches('.'); + match self.resolver.resolver.srv_lookup(hostname).await { + | Err(e) => Self::handle_resolve_error(&e, hostname)?, + | Ok(result) => { + return Ok(result.iter().next().map(|result| { + FedDest::Named( + result.target().to_string().trim_end_matches('.').to_owned(), + format!(":{}", result.port()) + .as_str() + .try_into() + .unwrap_or_else(|_| FedDest::default_port()), + ) + })); + }, } Ok(None) From 359a10eca49909ee20ce81fd45bbf56beb9620cc Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Wed, 14 May 2025 06:53:00 -0700 Subject: [PATCH 36/49] bump the number of allowed immutable memtables by 1, to allow for greater flood protection this should probably not be applied if you have rocksdb_atomic_flush = false (the default) --- src/database/engine/cf_opts.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 58358f02..b8d0b307 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -29,7 +29,7 @@ fn descriptor_cf_options( set_table_options(&mut opts, &desc, cache)?; opts.set_min_write_buffer_number(1); - opts.set_max_write_buffer_number(2); + opts.set_max_write_buffer_number(3); opts.set_write_buffer_size(desc.write_size); opts.set_target_file_size_base(desc.file_size); From 5a9200ee1f18ba25197dd382779f46877de2d3a4 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:02:05 -0700 Subject: [PATCH 37/49] upgrade some settings to enable 5g in continuwuity enable converged 6g at the edge in continuwuity better stateinfo_cache_capacity default better roomid_spacehierarchy_cache_capacity make sender workers default better and clamp value to core count update sender workers documentation add more parallelism_scaled and make them public update 1 document --- conduwuit-example.toml | 6 +-- src/core/config/mod.rs | 75 +++++++++++++++++++------------------- src/service/sending/mod.rs | 12 ++---- 3 files changed, 43 insertions(+), 50 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 2fab9cdf..0576cb0d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1664,11 +1664,9 @@ #stream_amplification = 1024 # Number of sender task workers; determines sender parallelism. Default is -# '0' which means the value is determined internally, likely matching the -# number of tokio worker-threads or number of cores, etc. Override by -# setting a non-zero value. +# number of CPU cores. Override by setting a different value. # -#sender_workers = 0 +#sender_workers = 4 # Enables listener sockets; can be set to false to disable listening. This # option is intended for developer/diagnostic purposes only. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 909462db..5eee80c1 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1900,12 +1900,10 @@ pub struct Config { pub stream_amplification: usize, /// Number of sender task workers; determines sender parallelism. Default is - /// '0' which means the value is determined internally, likely matching the - /// number of tokio worker-threads or number of cores, etc. Override by - /// setting a non-zero value. + /// '4'. Override by setting a different value. Values clamped 1 to core count. /// - /// default: 0 - #[serde(default)] + /// default: 4 + #[serde(default = "default_sender_workers")] pub sender_workers: usize, /// Enables listener sockets; can be set to false to disable listening. This @@ -2136,45 +2134,47 @@ fn default_database_backups_to_keep() -> i16 { 1 } fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) } -fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) } +fn default_db_cache_capacity_mb() -> f64 { 512.0 + parallelism_scaled_f64(512.0) } -fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } +fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(500_000) } fn default_cache_capacity_modifier() -> f64 { 1.0 } fn default_auth_chain_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) + parallelism_scaled_u32(50_000).saturating_add(500_000) } fn default_shorteventid_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(100_000) -} - -fn default_eventidshort_cache_capacity() -> u32 { - parallelism_scaled_u32(25_000).saturating_add(100_000) -} - -fn default_eventid_pdu_cache_capacity() -> u32 { - parallelism_scaled_u32(25_000).saturating_add(100_000) -} - -fn default_shortstatekey_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) -} - -fn default_statekeyshort_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) -} - -fn default_servernameevent_data_cache_capacity() -> u32 { parallelism_scaled_u32(100_000).saturating_add(500_000) } -fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) } +fn default_eventidshort_cache_capacity() -> u32 { + parallelism_scaled_u32(100_000).saturating_add(500_000) +} -fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } +fn default_eventid_pdu_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(500_000) +} -fn default_dns_cache_entries() -> u32 { 32768 } +fn default_shortstatekey_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(500_000) +} + +fn default_statekeyshort_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(500_000) +} + +fn default_servernameevent_data_cache_capacity() -> u32 { + parallelism_scaled_u32(200_000).saturating_add(500_000) +} + +fn default_stateinfo_cache_capacity() -> u32 { + parallelism_scaled_u32(500).clamp(100, 12000) } + +fn default_roomid_spacehierarchy_cache_capacity() -> u32 { + parallelism_scaled_u32(500).clamp(100, 12000) } + +fn default_dns_cache_entries() -> u32 { 327680 } fn default_dns_min_ttl() -> u64 { 60 * 180 } @@ -2365,14 +2365,13 @@ fn default_admin_log_capture() -> String { fn default_admin_room_tag() -> String { "m.server_notice".to_owned() } #[allow(clippy::as_conversions, clippy::cast_precision_loss)] -fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) } +pub fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) } -fn parallelism_scaled_u32(val: u32) -> u32 { - let val = val.try_into().expect("failed to cast u32 to usize"); - parallelism_scaled(val).try_into().unwrap_or(u32::MAX) -} +pub fn parallelism_scaled_u32(val: u32) -> u32 { val.saturating_mul(sys::available_parallelism() as u32) } -fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) } +pub fn parallelism_scaled_i32(val: i32) -> i32 { val.saturating_mul(sys::available_parallelism() as i32) } + +pub fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) } fn default_trusted_server_batch_size() -> usize { 256 } @@ -2392,6 +2391,8 @@ fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } +fn default_sender_workers() -> usize { 4 } + fn default_client_receive_timeout() -> u64 { 75 } fn default_client_request_timeout() -> u64 { 180 } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 08ca7010..ce687551 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -401,16 +401,10 @@ impl Service { fn num_senders(args: &crate::Args<'_>) -> usize { const MIN_SENDERS: usize = 1; - // Limit the number of senders to the number of workers threads or number of - // cores, conservatively. - let max_senders = args - .server - .metrics - .num_workers() - .min(available_parallelism()); + // Limit the maximum number of senders to the number of cores. + let max_senders = available_parallelism(); - // If the user doesn't override the default 0, this is intended to then default - // to 1 for now as multiple senders is experimental. + // default is 4 senders. clamp between 1 and core count. args.server .config .sender_workers From 4a58618d9d1d8b63c63b66adb5e8f593f0fd8fdc Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 7 Jun 2025 00:46:55 +0100 Subject: [PATCH 38/49] fix an auth rule not applying correctly --- src/core/matrix/state_res/event_auth.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 5c36ce03..0b5b72d7 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -255,6 +255,16 @@ where }, | Some(e) => e, }; + // just re-check 1.2 to work around a bug + let Some(room_id_server_name) = incoming_event.room_id().server_name() else { + warn!("room ID has no servername"); + return Ok(false); + }; + + if room_id_server_name != sender.server_name() { + warn!("servername of room ID does not match servername of m.room.create sender"); + return Ok(false); + } if incoming_event.room_id() != room_create_event.room_id() { warn!("room_id of incoming event does not match room_id of m.room.create event"); From 4aa47d383fb85fba05945e2212760c123080056b Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 7 Jun 2025 00:55:03 +0100 Subject: [PATCH 39/49] Note about ruma#2064 in TODO --- src/core/matrix/state_res/event_auth.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 0b5b72d7..40c32e03 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -217,8 +217,9 @@ where } /* - // TODO: In the past this code caused problems federating with synapse, maybe this has been - // resolved already. Needs testing. + // TODO: In the past this code was commented as it caused problems with Synapse. This is no + // longer the case. This needs to be implemented. + // See also: https://github.com/ruma/ruma/pull/2064 // // 2. Reject if auth_events // a. auth_events cannot have duplicate keys since it's a BTree From f528360eb1281830cf8e1766dce7765d79cf2762 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 10 Jun 2025 22:33:31 +0100 Subject: [PATCH 40/49] Kick up a fuss when m.room.create is unfindable --- src/core/matrix/state_res/event_auth.rs | 4 ++-- src/core/matrix/state_res/mod.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 40c32e03..31c660ed 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -30,7 +30,7 @@ use super::{ }, room_version::RoomVersion, }; -use crate::{debug, error, trace, warn}; +use crate::{debug, err_log, error, trace, warn}; // FIXME: field extracting could be bundled for `content` #[derive(Deserialize)] @@ -251,7 +251,7 @@ where let room_create_event = match room_create_event { | None => { - warn!("no m.room.create event in auth chain"); + error!("no m.room.create event in auth chain for {}!", incoming_event.event_id()); return Ok(false); }, | Some(e) => e, diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index ce9d9276..e721e14c 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -753,7 +753,7 @@ where } } } - // Did not find a power level event so we default to zero + warn!("could not find a power event in the mainline map, defaulting to zero depth"); Ok(0) } From ba4545d7ffc725b0bb7c8ad6236b7a61bf7c5ae5 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 10 Jun 2025 23:00:09 +0100 Subject: [PATCH 41/49] Fix room ID check --- src/core/matrix/state_res/event_auth.rs | 11 +++++++---- src/service/rooms/event_handler/handle_outlier_pdu.rs | 5 +---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 31c660ed..de4d20e1 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -30,7 +30,7 @@ use super::{ }, room_version::RoomVersion, }; -use crate::{debug, err_log, error, trace, warn}; +use crate::{debug, error, trace, warn}; // FIXME: field extracting could be bundled for `content` #[derive(Deserialize)] @@ -251,7 +251,7 @@ where let room_create_event = match room_create_event { | None => { - error!("no m.room.create event in auth chain for {}!", incoming_event.event_id()); + error!("no m.room.create event found for {}!", incoming_event.event_id()); return Ok(false); }, | Some(e) => e, @@ -262,8 +262,11 @@ where return Ok(false); }; - if room_id_server_name != sender.server_name() { - warn!("servername of room ID does not match servername of m.room.create sender"); + if room_id_server_name != room_create_event.sender().server_name() { + warn!( + "servername of room ID origin ({}) does not match servername of m.room.create sender ({})", + room_id_server_name, + room_create_event.sender().server_name()); return Ok(false); } diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index d79eed77..fad9ac74 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -122,10 +122,7 @@ where } // The original create event must be in the auth events - if !matches!( - auth_events.get(&(StateEventType::RoomCreate, String::new().into())), - Some(_) | None - ) { + if !auth_events.contains_key(&(StateEventType::RoomCreate, String::new().into())) { return Err!(Request(InvalidParam("Incoming event refers to wrong create event."))); } From 2a7557cd86e99e3e50f8a8d3bb5aa8ff4dcd3a56 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 01:27:25 +0100 Subject: [PATCH 42/49] more logs --- src/core/matrix/state_res/event_auth.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index de4d20e1..fc1119de 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -13,6 +13,7 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, third_party_invite::RoomThirdPartyInviteEventContent, }, + EventId, int, serde::{Base64, Raw}, }; @@ -21,7 +22,6 @@ use serde::{ de::{Error as _, IgnoredAny}, }; use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; - use super::{ Error, Event, Result, StateEventType, StateKey, TimelineEventType, power_levels::{ @@ -251,7 +251,14 @@ where let room_create_event = match room_create_event { | None => { - error!("no m.room.create event found for {}!", incoming_event.event_id()); + error!( + create_event = room_create_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + power_levels = power_levels_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + member_event = sender_member_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + "no m.room.create event found for {} ({})!", + incoming_event.event_id().as_str(), + incoming_event.room_id().as_str() + ); return Ok(false); }, | Some(e) => e, From cd2db47fdea11df0c9648377cfcccf62980160c2 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 01:42:19 +0100 Subject: [PATCH 43/49] log which room struggled to get mainline depth --- src/core/matrix/state_res/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index e721e14c..ba9c013d 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -733,8 +733,12 @@ where Fut: Future> + Send, E: Event + Send + Sync, { + let mut room_id = None; while let Some(sort_ev) = event { debug!(event_id = sort_ev.event_id().as_str(), "mainline"); + if room_id.is_none() { + room_id = Some(sort_ev.room_id().to_owned()); + } let id = sort_ev.event_id(); if let Some(depth) = mainline_map.get(id) { @@ -753,7 +757,7 @@ where } } } - warn!("could not find a power event in the mainline map, defaulting to zero depth"); + warn!("could not find a power event in the mainline map for {room_id:?}, defaulting to zero depth"); Ok(0) } From 24440c1d990f38b7a8bed6555dd49f513d15cbc2 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:02:49 -0700 Subject: [PATCH 44/49] change rocksdb stats level to 3 scale rocksdb background jobs and subcompactions change rocksdb default error level to info from error delete unused num_threads function fix warns from cargo --- conduwuit-example.toml | 2 +- src/core/config/mod.rs | 6 +++--- src/database/engine/db_opts.rs | 22 ++++------------------ 3 files changed, 8 insertions(+), 22 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 0576cb0d..f3432e4d 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1050,7 +1050,7 @@ # 3 to 5 = Statistics with possible performance impact. # 6 = All statistics. # -#rocksdb_stats_level = 1 +#rocksdb_stats_level = 3 # This is a password that can be configured that will let you login to the # server bot account (currently `@conduit`) for emergency troubleshooting diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 5eee80c1..6ae7e6af 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1218,7 +1218,7 @@ pub struct Config { /// 3 to 5 = Statistics with possible performance impact. /// 6 = All statistics. /// - /// default: 1 + /// default: 3 #[serde(default = "default_rocksdb_stats_level")] pub rocksdb_stats_level: u8, @@ -2278,7 +2278,7 @@ fn default_typing_client_timeout_max_s() -> u64 { 45 } fn default_rocksdb_recovery_mode() -> u8 { 1 } -fn default_rocksdb_log_level() -> String { "error".to_owned() } +fn default_rocksdb_log_level() -> String { "info".to_owned() } fn default_rocksdb_log_time_to_roll() -> usize { 0 } @@ -2310,7 +2310,7 @@ fn default_rocksdb_compression_level() -> i32 { 32767 } #[allow(clippy::doc_markdown)] fn default_rocksdb_bottommost_compression_level() -> i32 { 32767 } -fn default_rocksdb_stats_level() -> u8 { 1 } +fn default_rocksdb_stats_level() -> u8 { 3 } // I know, it's a great name #[must_use] diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 18cec742..1299443d 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -1,8 +1,6 @@ -use std::{cmp, convert::TryFrom}; - -use conduwuit::{Config, Result, utils}; +use conduwuit::{Config, Result}; use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel}; - +use conduwuit::config::{parallelism_scaled_i32, parallelism_scaled_u32}; use super::{cf_opts::cache_size_f64, logger::handle as handle_log}; /// Create database-wide options suitable for opening the database. This also @@ -23,8 +21,8 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul set_logging_defaults(&mut opts, config); // Processing - opts.set_max_background_jobs(num_threads::(config)?); - opts.set_max_subcompactions(num_threads::(config)?); + opts.set_max_background_jobs(parallelism_scaled_i32(1)); + opts.set_max_subcompactions(parallelism_scaled_u32(1)); opts.set_avoid_unnecessary_blocking_io(true); opts.set_max_file_opening_threads(0); @@ -126,15 +124,3 @@ fn set_logging_defaults(opts: &mut Options, config: &Config) { opts.set_callback_logger(rocksdb_log_level, &handle_log); } } - -fn num_threads>(config: &Config) -> Result { - const MIN_PARALLELISM: usize = 2; - - let requested = if config.rocksdb_parallelism_threads != 0 { - config.rocksdb_parallelism_threads - } else { - utils::available_parallelism() - }; - - utils::math::try_into::(cmp::max(MIN_PARALLELISM, requested)) -} From addea03a242a3d1340ff2b426740a8c4954fe6f8 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Wed, 18 Jun 2025 12:48:27 -0700 Subject: [PATCH 45/49] make fetching key room events less smart --- src/core/matrix/state_res/event_auth.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index fc1119de..ec70d684 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -242,12 +242,16 @@ where } */ - let (room_create_event, power_levels_event, sender_member_event) = join3( - fetch_state(&StateEventType::RoomCreate, ""), - fetch_state(&StateEventType::RoomPowerLevels, ""), - fetch_state(&StateEventType::RoomMember, sender.as_str()), - ) - .await; + // let (room_create_event, power_levels_event, sender_member_event) = join3( + // fetch_state(&StateEventType::RoomCreate, ""), + // fetch_state(&StateEventType::RoomPowerLevels, ""), + // fetch_state(&StateEventType::RoomMember, sender.as_str()), + // ) + // .await; + + let room_create_event = fetch_state(&StateEventType::RoomCreate, "").await; + let power_levels_event = fetch_state(&StateEventType::RoomPowerLevels, "").await; + let sender_member_event = fetch_state(&StateEventType::RoomMember, sender.as_str()).await; let room_create_event = match room_create_event { | None => { From 2910ac781f6e4fb2ace5a8cebe7505569ead11d2 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Thu, 3 Jul 2025 14:39:10 -0700 Subject: [PATCH 46/49] lock the getter instead ??? c/o M --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 2 +- src/service/rooms/state/mod.rs | 1 + src/service/rooms/timeline/create.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 4093cb05..05f88849 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -149,7 +149,7 @@ where let extremities: Vec<_> = self .services .state - .get_forward_extremities(room_id) + .get_forward_extremities(room_id, &state_lock) .map(ToOwned::to_owned) .ready_filter(|event_id| { // Remove any that are referenced by this incoming event's prev_events diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 641aa6a9..92881126 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -388,6 +388,7 @@ impl Service { pub fn get_forward_extremities<'a>( &'a self, room_id: &'a RoomId, + _state_lock: &'a RoomMutexGuard, ) -> impl Stream + Send + '_ { let prefix = (room_id, Interfix); diff --git a/src/service/rooms/timeline/create.rs b/src/service/rooms/timeline/create.rs index 20ccaf56..1be2f58b 100644 --- a/src/service/rooms/timeline/create.rs +++ b/src/service/rooms/timeline/create.rs @@ -42,7 +42,7 @@ pub async fn create_hash_and_sign_event( let prev_events: Vec = self .services .state - .get_forward_extremities(room_id) + .get_forward_extremities(room_id, _mutex_lock) .take(20) .map(Into::into) .collect() From 77c7df6828946b8431fd66a501349e869f834f11 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Thu, 3 Jul 2025 14:44:27 -0700 Subject: [PATCH 47/49] vehicle loan documentation now available at window 7 --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 05f88849..bc2408df 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -6,6 +6,7 @@ use conduwuit::{ trace, utils::stream::{BroadbandExt, ReadyExt}, warn, + info }; use futures::{FutureExt, StreamExt, future::ready}; use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType}; @@ -167,6 +168,8 @@ where .collect() .await; + if extremities.len() == 0 { info!("Retained zero extremities when upgrading outlier PDU to timeline PDU with {} previous events, event id: {}", incoming_pdu.prev_events.len(), incoming_pdu.event_id) } + debug!( "Retained {} extremities checked against {} prev_events", extremities.len(), From 1f2d3b126e8ef8e295b32403856aef357db329c7 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:13:30 -0700 Subject: [PATCH 48/49] sender_workers scaling. this time, with feeling! --- src/core/config/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 6ae7e6af..b23ad39d 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1900,9 +1900,9 @@ pub struct Config { pub stream_amplification: usize, /// Number of sender task workers; determines sender parallelism. Default is - /// '4'. Override by setting a different value. Values clamped 1 to core count. + /// core count. Override by setting a different value. /// - /// default: 4 + /// default: core count #[serde(default = "default_sender_workers")] pub sender_workers: usize, @@ -2391,7 +2391,7 @@ fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } -fn default_sender_workers() -> usize { 4 } +fn default_sender_workers() -> usize { parallelism_scaled(1) } fn default_client_receive_timeout() -> u64 { 75 } From b882dbe616c70583756951414353530cf0d58068 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Mon, 30 Jun 2025 15:25:11 -0700 Subject: [PATCH 49/49] more funny settings (part 3 of 12) --- src/core/config/mod.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index b23ad39d..b0317d00 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2136,40 +2136,41 @@ fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64( fn default_db_cache_capacity_mb() -> f64 { 512.0 + parallelism_scaled_f64(512.0) } -fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(500_000) } +fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_cache_capacity_modifier() -> f64 { 1.0 } fn default_auth_chain_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_shorteventid_cache_capacity() -> u32 { - parallelism_scaled_u32(100_000).saturating_add(500_000) + parallelism_scaled_u32(100_000).saturating_add(100_000) } fn default_eventidshort_cache_capacity() -> u32 { - parallelism_scaled_u32(100_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_eventid_pdu_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_shortstatekey_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_statekeyshort_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_servernameevent_data_cache_capacity() -> u32 { - parallelism_scaled_u32(200_000).saturating_add(500_000) + parallelism_scaled_u32(100_000).saturating_add(100_000) } fn default_stateinfo_cache_capacity() -> u32 { - parallelism_scaled_u32(500).clamp(100, 12000) } + parallelism_scaled_u32(500).clamp(100, 12000) +} fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(500).clamp(100, 12000) }