From 35e922028471982aa709353814257ddc355f4fa5 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Fri, 25 Apr 2025 21:06:00 -0700 Subject: [PATCH 01/25] probably incorrectly delete support for non-standardized matrix srv record --- src/service/resolver/actual.rs | 37 ++++++++++++++++------------------ 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/src/service/resolver/actual.rs b/src/service/resolver/actual.rs index d23ef95a..52cd5d7d 100644 --- a/src/service/resolver/actual.rs +++ b/src/service/resolver/actual.rs @@ -306,28 +306,25 @@ impl super::Service { #[tracing::instrument(name = "srv", level = "debug", skip(self))] async fn query_srv_record(&self, hostname: &'_ str) -> Result> { - let hostnames = - [format!("_matrix-fed._tcp.{hostname}."), format!("_matrix._tcp.{hostname}.")]; + self.services.server.check_running()?; - for hostname in hostnames { - self.services.server.check_running()?; + debug!("querying SRV for {hostname:?}"); - debug!("querying SRV for {hostname:?}"); - let hostname = hostname.trim_end_matches('.'); - match self.resolver.resolver.srv_lookup(hostname).await { - | Err(e) => Self::handle_resolve_error(&e, hostname)?, - | Ok(result) => { - return Ok(result.iter().next().map(|result| { - FedDest::Named( - result.target().to_string().trim_end_matches('.').to_owned(), - format!(":{}", result.port()) - .as_str() - .try_into() - .unwrap_or_else(|_| FedDest::default_port()), - ) - })); - }, - } + let hostname_suffix = format!("_matrix-fed._tcp.{hostname}."); + let hostname = hostname_suffix.trim_end_matches('.'); + match self.resolver.resolver.srv_lookup(hostname).await { + | Err(e) => Self::handle_resolve_error(&e, hostname)?, + | Ok(result) => { + return Ok(result.iter().next().map(|result| { + FedDest::Named( + result.target().to_string().trim_end_matches('.').to_owned(), + format!(":{}", result.port()) + .as_str() + .try_into() + .unwrap_or_else(|_| FedDest::default_port()), + ) + })); + }, } Ok(None) From d15eba2920972463148a1fafb80f6f9b4f1348b0 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Wed, 14 May 2025 06:53:00 -0700 Subject: [PATCH 02/25] bump the number of allowed immutable memtables by 1, to allow for greater flood protection this should probably not be applied if you have rocksdb_atomic_flush = false (the default) --- src/database/engine/cf_opts.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/engine/cf_opts.rs b/src/database/engine/cf_opts.rs index 58358f02..b8d0b307 100644 --- a/src/database/engine/cf_opts.rs +++ b/src/database/engine/cf_opts.rs @@ -29,7 +29,7 @@ fn descriptor_cf_options( set_table_options(&mut opts, &desc, cache)?; opts.set_min_write_buffer_number(1); - opts.set_max_write_buffer_number(2); + opts.set_max_write_buffer_number(3); opts.set_write_buffer_size(desc.write_size); opts.set_target_file_size_base(desc.file_size); From 0eb9db8e71e1e952a2cd45f96ae2d185c08baa2e Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:02:05 -0700 Subject: [PATCH 03/25] upgrade some settings to enable 5g in continuwuity enable converged 6g at the edge in continuwuity better stateinfo_cache_capacity default better roomid_spacehierarchy_cache_capacity make sender workers default better and clamp value to core count update sender workers documentation add more parallelism_scaled and make them public update 1 document --- conduwuit-example.toml | 6 +-- src/core/config/mod.rs | 75 +++++++++++++++++++------------------- src/service/sending/mod.rs | 12 ++---- 3 files changed, 43 insertions(+), 50 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 541050b1..3bd08ed2 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1680,11 +1680,9 @@ #stream_amplification = 1024 # Number of sender task workers; determines sender parallelism. Default is -# '0' which means the value is determined internally, likely matching the -# number of tokio worker-threads or number of cores, etc. Override by -# setting a non-zero value. +# number of CPU cores. Override by setting a different value. # -#sender_workers = 0 +#sender_workers = 4 # Enables listener sockets; can be set to false to disable listening. This # option is intended for developer/diagnostic purposes only. diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index aa021be7..336e6e48 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1917,12 +1917,10 @@ pub struct Config { pub stream_amplification: usize, /// Number of sender task workers; determines sender parallelism. Default is - /// '0' which means the value is determined internally, likely matching the - /// number of tokio worker-threads or number of cores, etc. Override by - /// setting a non-zero value. + /// '4'. Override by setting a different value. Values clamped 1 to core count. /// - /// default: 0 - #[serde(default)] + /// default: 4 + #[serde(default = "default_sender_workers")] pub sender_workers: usize, /// Enables listener sockets; can be set to false to disable listening. This @@ -2153,45 +2151,47 @@ fn default_database_backups_to_keep() -> i16 { 1 } fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) } -fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) } +fn default_db_cache_capacity_mb() -> f64 { 512.0 + parallelism_scaled_f64(512.0) } -fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) } +fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(500_000) } fn default_cache_capacity_modifier() -> f64 { 1.0 } fn default_auth_chain_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) + parallelism_scaled_u32(50_000).saturating_add(500_000) } fn default_shorteventid_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(100_000) -} - -fn default_eventidshort_cache_capacity() -> u32 { - parallelism_scaled_u32(25_000).saturating_add(100_000) -} - -fn default_eventid_pdu_cache_capacity() -> u32 { - parallelism_scaled_u32(25_000).saturating_add(100_000) -} - -fn default_shortstatekey_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) -} - -fn default_statekeyshort_cache_capacity() -> u32 { - parallelism_scaled_u32(10_000).saturating_add(100_000) -} - -fn default_servernameevent_data_cache_capacity() -> u32 { parallelism_scaled_u32(100_000).saturating_add(500_000) } -fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) } +fn default_eventidshort_cache_capacity() -> u32 { + parallelism_scaled_u32(100_000).saturating_add(500_000) +} -fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) } +fn default_eventid_pdu_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(500_000) +} -fn default_dns_cache_entries() -> u32 { 32768 } +fn default_shortstatekey_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(500_000) +} + +fn default_statekeyshort_cache_capacity() -> u32 { + parallelism_scaled_u32(50_000).saturating_add(500_000) +} + +fn default_servernameevent_data_cache_capacity() -> u32 { + parallelism_scaled_u32(200_000).saturating_add(500_000) +} + +fn default_stateinfo_cache_capacity() -> u32 { + parallelism_scaled_u32(500).clamp(100, 12000) } + +fn default_roomid_spacehierarchy_cache_capacity() -> u32 { + parallelism_scaled_u32(500).clamp(100, 12000) } + +fn default_dns_cache_entries() -> u32 { 327680 } fn default_dns_min_ttl() -> u64 { 60 * 180 } @@ -2384,14 +2384,13 @@ fn default_admin_log_capture() -> String { fn default_admin_room_tag() -> String { "m.server_notice".to_owned() } #[allow(clippy::as_conversions, clippy::cast_precision_loss)] -fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) } +pub fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) } -fn parallelism_scaled_u32(val: u32) -> u32 { - let val = val.try_into().expect("failed to cast u32 to usize"); - parallelism_scaled(val).try_into().unwrap_or(u32::MAX) -} +pub fn parallelism_scaled_u32(val: u32) -> u32 { val.saturating_mul(sys::available_parallelism() as u32) } -fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) } +pub fn parallelism_scaled_i32(val: i32) -> i32 { val.saturating_mul(sys::available_parallelism() as i32) } + +pub fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) } fn default_trusted_server_batch_size() -> usize { 256 } @@ -2411,6 +2410,8 @@ fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } +fn default_sender_workers() -> usize { 4 } + fn default_client_receive_timeout() -> u64 { 75 } fn default_client_request_timeout() -> u64 { 180 } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 08ca7010..ce687551 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -401,16 +401,10 @@ impl Service { fn num_senders(args: &crate::Args<'_>) -> usize { const MIN_SENDERS: usize = 1; - // Limit the number of senders to the number of workers threads or number of - // cores, conservatively. - let max_senders = args - .server - .metrics - .num_workers() - .min(available_parallelism()); + // Limit the maximum number of senders to the number of cores. + let max_senders = available_parallelism(); - // If the user doesn't override the default 0, this is intended to then default - // to 1 for now as multiple senders is experimental. + // default is 4 senders. clamp between 1 and core count. args.server .config .sender_workers From 43ce4625b81c16158d698fd9ceba1c8cddcd5af9 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 7 Jun 2025 00:46:55 +0100 Subject: [PATCH 04/25] fix an auth rule not applying correctly --- src/core/matrix/state_res/event_auth.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 77a4a95c..ca266c5c 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -256,6 +256,16 @@ where }, | Some(e) => e, }; + // just re-check 1.2 to work around a bug + let Some(room_id_server_name) = incoming_event.room_id().server_name() else { + warn!("room ID has no servername"); + return Ok(false); + }; + + if room_id_server_name != sender.server_name() { + warn!("servername of room ID does not match servername of m.room.create sender"); + return Ok(false); + } if incoming_event.room_id() != room_create_event.room_id() { warn!("room_id of incoming event does not match room_id of m.room.create event"); From f857440a561afa2b455dc73b190583ffeed8e457 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Sat, 7 Jun 2025 00:55:03 +0100 Subject: [PATCH 05/25] Note about ruma#2064 in TODO --- src/core/matrix/state_res/event_auth.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index ca266c5c..40c32e03 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -149,8 +149,8 @@ where for<'a> &'a E: Event + Send, { debug!( - event_id = %incoming_event.event_id(), - event_type = ?incoming_event.event_type(), + event_id = format!("{}", incoming_event.event_id()), + event_type = format!("{}", incoming_event.event_type()), "auth_check beginning" ); @@ -219,7 +219,7 @@ where /* // TODO: In the past this code was commented as it caused problems with Synapse. This is no // longer the case. This needs to be implemented. - // See also: https://github.com/ruma/ruma/pull/2064 + // See also: https://github.com/ruma/ruma/pull/2064 // // 2. Reject if auth_events // a. auth_events cannot have duplicate keys since it's a BTree From 31bc7457d63c4c8dcd0810ad51f4c636558f9c49 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 10 Jun 2025 22:33:31 +0100 Subject: [PATCH 06/25] Kick up a fuss when m.room.create is unfindable --- src/core/matrix/state_res/event_auth.rs | 4 ++-- src/core/matrix/state_res/mod.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 40c32e03..31c660ed 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -30,7 +30,7 @@ use super::{ }, room_version::RoomVersion, }; -use crate::{debug, error, trace, warn}; +use crate::{debug, err_log, error, trace, warn}; // FIXME: field extracting could be bundled for `content` #[derive(Deserialize)] @@ -251,7 +251,7 @@ where let room_create_event = match room_create_event { | None => { - warn!("no m.room.create event in auth chain"); + error!("no m.room.create event in auth chain for {}!", incoming_event.event_id()); return Ok(false); }, | Some(e) => e, diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index ce9d9276..e721e14c 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -753,7 +753,7 @@ where } } } - // Did not find a power level event so we default to zero + warn!("could not find a power event in the mainline map, defaulting to zero depth"); Ok(0) } From 38c26181f82d5abd44bc294d496414fd48df416c Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Tue, 10 Jun 2025 23:00:09 +0100 Subject: [PATCH 07/25] Fix room ID check --- src/core/matrix/state_res/event_auth.rs | 11 +++++++---- src/service/rooms/event_handler/handle_outlier_pdu.rs | 5 +---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index 31c660ed..de4d20e1 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -30,7 +30,7 @@ use super::{ }, room_version::RoomVersion, }; -use crate::{debug, err_log, error, trace, warn}; +use crate::{debug, error, trace, warn}; // FIXME: field extracting could be bundled for `content` #[derive(Deserialize)] @@ -251,7 +251,7 @@ where let room_create_event = match room_create_event { | None => { - error!("no m.room.create event in auth chain for {}!", incoming_event.event_id()); + error!("no m.room.create event found for {}!", incoming_event.event_id()); return Ok(false); }, | Some(e) => e, @@ -262,8 +262,11 @@ where return Ok(false); }; - if room_id_server_name != sender.server_name() { - warn!("servername of room ID does not match servername of m.room.create sender"); + if room_id_server_name != room_create_event.sender().server_name() { + warn!( + "servername of room ID origin ({}) does not match servername of m.room.create sender ({})", + room_id_server_name, + room_create_event.sender().server_name()); return Ok(false); } diff --git a/src/service/rooms/event_handler/handle_outlier_pdu.rs b/src/service/rooms/event_handler/handle_outlier_pdu.rs index d79eed77..fad9ac74 100644 --- a/src/service/rooms/event_handler/handle_outlier_pdu.rs +++ b/src/service/rooms/event_handler/handle_outlier_pdu.rs @@ -122,10 +122,7 @@ where } // The original create event must be in the auth events - if !matches!( - auth_events.get(&(StateEventType::RoomCreate, String::new().into())), - Some(_) | None - ) { + if !auth_events.contains_key(&(StateEventType::RoomCreate, String::new().into())) { return Err!(Request(InvalidParam("Incoming event refers to wrong create event."))); } From 528f63dfbc494cffb20e59e9ac291d250e4c96d4 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 01:27:25 +0100 Subject: [PATCH 08/25] more logs --- src/core/matrix/state_res/event_auth.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index de4d20e1..fc1119de 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -13,6 +13,7 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, third_party_invite::RoomThirdPartyInviteEventContent, }, + EventId, int, serde::{Base64, Raw}, }; @@ -21,7 +22,6 @@ use serde::{ de::{Error as _, IgnoredAny}, }; use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue}; - use super::{ Error, Event, Result, StateEventType, StateKey, TimelineEventType, power_levels::{ @@ -251,7 +251,14 @@ where let room_create_event = match room_create_event { | None => { - error!("no m.room.create event found for {}!", incoming_event.event_id()); + error!( + create_event = room_create_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + power_levels = power_levels_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + member_event = sender_member_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(), + "no m.room.create event found for {} ({})!", + incoming_event.event_id().as_str(), + incoming_event.room_id().as_str() + ); return Ok(false); }, | Some(e) => e, From 307a0bd627632967d501227db50ad11a8f2518d2 Mon Sep 17 00:00:00 2001 From: nexy7574 Date: Wed, 11 Jun 2025 01:42:19 +0100 Subject: [PATCH 09/25] log which room struggled to get mainline depth --- src/core/matrix/state_res/mod.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/core/matrix/state_res/mod.rs b/src/core/matrix/state_res/mod.rs index e721e14c..ba9c013d 100644 --- a/src/core/matrix/state_res/mod.rs +++ b/src/core/matrix/state_res/mod.rs @@ -733,8 +733,12 @@ where Fut: Future> + Send, E: Event + Send + Sync, { + let mut room_id = None; while let Some(sort_ev) = event { debug!(event_id = sort_ev.event_id().as_str(), "mainline"); + if room_id.is_none() { + room_id = Some(sort_ev.room_id().to_owned()); + } let id = sort_ev.event_id(); if let Some(depth) = mainline_map.get(id) { @@ -753,7 +757,7 @@ where } } } - warn!("could not find a power event in the mainline map, defaulting to zero depth"); + warn!("could not find a power event in the mainline map for {room_id:?}, defaulting to zero depth"); Ok(0) } From ed6426ea95c7f9f2e9b38ee7e9532b4b1b72af2c Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:02:49 -0700 Subject: [PATCH 10/25] change rocksdb stats level to 3 scale rocksdb background jobs and subcompactions change rocksdb default error level to info from error delete unused num_threads function fix warns from cargo --- conduwuit-example.toml | 2 +- src/core/config/mod.rs | 6 +++--- src/database/engine/db_opts.rs | 22 ++++------------------ 3 files changed, 8 insertions(+), 22 deletions(-) diff --git a/conduwuit-example.toml b/conduwuit-example.toml index 3bd08ed2..5b66044a 100644 --- a/conduwuit-example.toml +++ b/conduwuit-example.toml @@ -1066,7 +1066,7 @@ # 3 to 5 = Statistics with possible performance impact. # 6 = All statistics. # -#rocksdb_stats_level = 1 +#rocksdb_stats_level = 3 # This is a password that can be configured that will let you login to the # server bot account (currently `@conduit`) for emergency troubleshooting diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index 336e6e48..a0d58274 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1235,7 +1235,7 @@ pub struct Config { /// 3 to 5 = Statistics with possible performance impact. /// 6 = All statistics. /// - /// default: 1 + /// default: 3 #[serde(default = "default_rocksdb_stats_level")] pub rocksdb_stats_level: u8, @@ -2297,7 +2297,7 @@ fn default_typing_client_timeout_max_s() -> u64 { 45 } fn default_rocksdb_recovery_mode() -> u8 { 1 } -fn default_rocksdb_log_level() -> String { "error".to_owned() } +fn default_rocksdb_log_level() -> String { "info".to_owned() } fn default_rocksdb_log_time_to_roll() -> usize { 0 } @@ -2329,7 +2329,7 @@ fn default_rocksdb_compression_level() -> i32 { 32767 } #[allow(clippy::doc_markdown)] fn default_rocksdb_bottommost_compression_level() -> i32 { 32767 } -fn default_rocksdb_stats_level() -> u8 { 1 } +fn default_rocksdb_stats_level() -> u8 { 3 } // I know, it's a great name #[must_use] diff --git a/src/database/engine/db_opts.rs b/src/database/engine/db_opts.rs index 18cec742..1299443d 100644 --- a/src/database/engine/db_opts.rs +++ b/src/database/engine/db_opts.rs @@ -1,8 +1,6 @@ -use std::{cmp, convert::TryFrom}; - -use conduwuit::{Config, Result, utils}; +use conduwuit::{Config, Result}; use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel}; - +use conduwuit::config::{parallelism_scaled_i32, parallelism_scaled_u32}; use super::{cf_opts::cache_size_f64, logger::handle as handle_log}; /// Create database-wide options suitable for opening the database. This also @@ -23,8 +21,8 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul set_logging_defaults(&mut opts, config); // Processing - opts.set_max_background_jobs(num_threads::(config)?); - opts.set_max_subcompactions(num_threads::(config)?); + opts.set_max_background_jobs(parallelism_scaled_i32(1)); + opts.set_max_subcompactions(parallelism_scaled_u32(1)); opts.set_avoid_unnecessary_blocking_io(true); opts.set_max_file_opening_threads(0); @@ -126,15 +124,3 @@ fn set_logging_defaults(opts: &mut Options, config: &Config) { opts.set_callback_logger(rocksdb_log_level, &handle_log); } } - -fn num_threads>(config: &Config) -> Result { - const MIN_PARALLELISM: usize = 2; - - let requested = if config.rocksdb_parallelism_threads != 0 { - config.rocksdb_parallelism_threads - } else { - utils::available_parallelism() - }; - - utils::math::try_into::(cmp::max(MIN_PARALLELISM, requested)) -} From 10dd534defc97145a29c693d7ab940374f4dfaaf Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Wed, 18 Jun 2025 12:48:27 -0700 Subject: [PATCH 11/25] make fetching key room events less smart --- src/core/matrix/state_res/event_auth.rs | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/src/core/matrix/state_res/event_auth.rs b/src/core/matrix/state_res/event_auth.rs index fc1119de..ec70d684 100644 --- a/src/core/matrix/state_res/event_auth.rs +++ b/src/core/matrix/state_res/event_auth.rs @@ -242,12 +242,16 @@ where } */ - let (room_create_event, power_levels_event, sender_member_event) = join3( - fetch_state(&StateEventType::RoomCreate, ""), - fetch_state(&StateEventType::RoomPowerLevels, ""), - fetch_state(&StateEventType::RoomMember, sender.as_str()), - ) - .await; + // let (room_create_event, power_levels_event, sender_member_event) = join3( + // fetch_state(&StateEventType::RoomCreate, ""), + // fetch_state(&StateEventType::RoomPowerLevels, ""), + // fetch_state(&StateEventType::RoomMember, sender.as_str()), + // ) + // .await; + + let room_create_event = fetch_state(&StateEventType::RoomCreate, "").await; + let power_levels_event = fetch_state(&StateEventType::RoomPowerLevels, "").await; + let sender_member_event = fetch_state(&StateEventType::RoomMember, sender.as_str()).await; let room_create_event = match room_create_event { | None => { From c628f31ad2b0409bd863285b6157f392f3bc6b77 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Thu, 3 Jul 2025 14:39:10 -0700 Subject: [PATCH 12/25] lock the getter instead ??? c/o M --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 2 +- src/service/rooms/state/mod.rs | 1 + src/service/rooms/timeline/create.rs | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index d2e0623c..c92d7ac7 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -175,7 +175,7 @@ where let extremities: Vec<_> = self .services .state - .get_forward_extremities(room_id) + .get_forward_extremities(room_id, &state_lock) .map(ToOwned::to_owned) .ready_filter(|event_id| { // Remove any that are referenced by this incoming event's prev_events diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 641aa6a9..92881126 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -388,6 +388,7 @@ impl Service { pub fn get_forward_extremities<'a>( &'a self, room_id: &'a RoomId, + _state_lock: &'a RoomMutexGuard, ) -> impl Stream + Send + '_ { let prefix = (room_id, Interfix); diff --git a/src/service/rooms/timeline/create.rs b/src/service/rooms/timeline/create.rs index 6732cd8e..188e71a4 100644 --- a/src/service/rooms/timeline/create.rs +++ b/src/service/rooms/timeline/create.rs @@ -42,7 +42,7 @@ pub async fn create_hash_and_sign_event( let prev_events: Vec = self .services .state - .get_forward_extremities(room_id) + .get_forward_extremities(room_id, _mutex_lock) .take(20) .map(Into::into) .collect() From 0f78f77991385a8cd59888fa99472cfbadaa549a Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Thu, 3 Jul 2025 14:44:27 -0700 Subject: [PATCH 13/25] vehicle loan documentation now available at window 7 --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index c92d7ac7..717fe711 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -6,6 +6,7 @@ use conduwuit::{ trace, utils::stream::{BroadbandExt, ReadyExt}, warn, + info }; use futures::{FutureExt, StreamExt, future::ready}; use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType}; @@ -193,6 +194,8 @@ where .collect() .await; + if extremities.len() == 0 { info!("Retained zero extremities when upgrading outlier PDU to timeline PDU with {} previous events, event id: {}", incoming_pdu.prev_events.len(), incoming_pdu.event_id) } + debug!( "Retained {} extremities checked against {} prev_events", extremities.len(), From 4308db5e305799b3cf58b91c2a69f50064edda09 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 21 Jun 2025 08:13:30 -0700 Subject: [PATCH 14/25] sender_workers scaling. this time, with feeling! --- src/core/config/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index a0d58274..d8dd5fcf 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -1917,9 +1917,9 @@ pub struct Config { pub stream_amplification: usize, /// Number of sender task workers; determines sender parallelism. Default is - /// '4'. Override by setting a different value. Values clamped 1 to core count. + /// core count. Override by setting a different value. /// - /// default: 4 + /// default: core count #[serde(default = "default_sender_workers")] pub sender_workers: usize, @@ -2410,7 +2410,7 @@ fn default_stream_width_scale() -> f32 { 1.0 } fn default_stream_amplification() -> usize { 1024 } -fn default_sender_workers() -> usize { 4 } +fn default_sender_workers() -> usize { parallelism_scaled(1) } fn default_client_receive_timeout() -> u64 { 75 } From 76b9b88eceee189d66ff2466a68ce5447ef2d558 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Mon, 30 Jun 2025 15:25:11 -0700 Subject: [PATCH 15/25] more funny settings (part 3 of 12) --- src/core/config/mod.rs | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/src/core/config/mod.rs b/src/core/config/mod.rs index d8dd5fcf..6cbda97e 100644 --- a/src/core/config/mod.rs +++ b/src/core/config/mod.rs @@ -2153,40 +2153,41 @@ fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64( fn default_db_cache_capacity_mb() -> f64 { 512.0 + parallelism_scaled_f64(512.0) } -fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(500_000) } +fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_cache_capacity_modifier() -> f64 { 1.0 } fn default_auth_chain_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_shorteventid_cache_capacity() -> u32 { - parallelism_scaled_u32(100_000).saturating_add(500_000) + parallelism_scaled_u32(100_000).saturating_add(100_000) } fn default_eventidshort_cache_capacity() -> u32 { - parallelism_scaled_u32(100_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_eventid_pdu_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_shortstatekey_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_statekeyshort_cache_capacity() -> u32 { - parallelism_scaled_u32(50_000).saturating_add(500_000) + parallelism_scaled_u32(50_000).saturating_add(100_000) } fn default_servernameevent_data_cache_capacity() -> u32 { - parallelism_scaled_u32(200_000).saturating_add(500_000) + parallelism_scaled_u32(100_000).saturating_add(100_000) } fn default_stateinfo_cache_capacity() -> u32 { - parallelism_scaled_u32(500).clamp(100, 12000) } + parallelism_scaled_u32(500).clamp(100, 12000) +} fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(500).clamp(100, 12000) } From d5039bd72f7c4b9168c1c4647df087f193524349 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Mon, 21 Jul 2025 17:22:19 -0700 Subject: [PATCH 16/25] fix too many infos --- src/service/rooms/event_handler/upgrade_outlier_pdu.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs index 717fe711..1b6b8af5 100644 --- a/src/service/rooms/event_handler/upgrade_outlier_pdu.rs +++ b/src/service/rooms/event_handler/upgrade_outlier_pdu.rs @@ -5,8 +5,7 @@ use conduwuit::{ matrix::{Event, EventTypeExt, PduEvent, StateKey, state_res}, trace, utils::stream::{BroadbandExt, ReadyExt}, - warn, - info + warn }; use futures::{FutureExt, StreamExt, future::ready}; use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType}; From c4f6d1ed09e38c773b8c9bf2ad1094b9ec18065f Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Sat, 2 Aug 2025 20:59:02 -0700 Subject: [PATCH 17/25] exponential backoff is now just bees. did you want bees? no? well you have them now. congrats --- src/service/presence/mod.rs | 2 +- src/service/resolver/dns.rs | 4 ++-- src/service/rooms/event_handler/fetch_and_handle_outliers.rs | 2 +- src/service/rooms/event_handler/handle_prev_pdu.rs | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/service/presence/mod.rs b/src/service/presence/mod.rs index 8f646be6..876d8bee 100644 --- a/src/service/presence/mod.rs +++ b/src/service/presence/mod.rs @@ -100,7 +100,7 @@ impl Service { /// Pings the presence of the given user in the given room, setting the /// specified state. pub async fn ping_presence(&self, user_id: &UserId, new_state: &PresenceState) -> Result<()> { - const REFRESH_TIMEOUT: u64 = 60 * 1000; + const REFRESH_TIMEOUT: u64 = 60 * 1000 * 4; let last_presence = self.db.get_presence(user_id).await; let state_changed = match last_presence { diff --git a/src/service/resolver/dns.rs b/src/service/resolver/dns.rs index 3a0b2551..6856853e 100644 --- a/src/service/resolver/dns.rs +++ b/src/service/resolver/dns.rs @@ -53,9 +53,9 @@ impl Resolver { opts.cache_size = config.dns_cache_entries as usize; opts.preserve_intermediates = true; opts.negative_min_ttl = Some(Duration::from_secs(config.dns_min_ttl_nxdomain)); - opts.negative_max_ttl = Some(Duration::from_secs(60 * 60 * 24 * 30)); + opts.negative_max_ttl = Some(Duration::from_secs(60 * 60 * 24)); opts.positive_min_ttl = Some(Duration::from_secs(config.dns_min_ttl)); - opts.positive_max_ttl = Some(Duration::from_secs(60 * 60 * 24 * 7)); + opts.positive_max_ttl = Some(Duration::from_secs(60 * 60 * 24)); opts.timeout = Duration::from_secs(config.dns_timeout); opts.attempts = config.dns_attempts as usize; opts.try_tcp_on_error = config.dns_tcp_fallback; diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index 59b768f2..c72cce87 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -79,7 +79,7 @@ where { // Exponential backoff const MIN_DURATION: u64 = 60 * 2; - const MAX_DURATION: u64 = 60 * 60 * 8; + const MAX_DURATION: u64 = 60 * 60; if continue_exponential_backoff_secs( MIN_DURATION, MAX_DURATION, diff --git a/src/service/rooms/event_handler/handle_prev_pdu.rs b/src/service/rooms/event_handler/handle_prev_pdu.rs index cb4978d9..c786599a 100644 --- a/src/service/rooms/event_handler/handle_prev_pdu.rs +++ b/src/service/rooms/event_handler/handle_prev_pdu.rs @@ -46,7 +46,7 @@ where { // Exponential backoff const MIN_DURATION: u64 = 5 * 60; - const MAX_DURATION: u64 = 60 * 60 * 24; + const MAX_DURATION: u64 = 60 * 60; if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) { debug!( ?tries, From f8b86e459800bba3858de35f97398a201e013234 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Thu, 21 Aug 2025 12:19:53 -0700 Subject: [PATCH 18/25] fix warn by removing unused debug imports --- src/service/federation/execute.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index ace29c1c..9a3cd828 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -6,7 +6,7 @@ use std::{ use bytes::Bytes; use conduwuit::{ - Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err, + Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, err, error::inspect_debug_log, implement, trace, utils::string::EMPTY, warn, }; use http::{HeaderValue, header::AUTHORIZATION}; From 0e6b7f6d5f7eb2c739eabfbfdb5d4540b9fdef5a Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Fri, 22 Aug 2025 10:44:31 -0700 Subject: [PATCH 19/25] log which room is being backfilled --- src/service/rooms/timeline/backfill.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/rooms/timeline/backfill.rs b/src/service/rooms/timeline/backfill.rs index e976981e..a885f2a3 100644 --- a/src/service/rooms/timeline/backfill.rs +++ b/src/service/rooms/timeline/backfill.rs @@ -100,7 +100,7 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re .boxed(); while let Some(ref backfill_server) = servers.next().await { - info!("Asking {backfill_server} for backfill"); + info!("Asking {backfill_server} for backfill of room {room_id}"); let response = self .services .sending From a0bde96494e81b2a19e65656273bd2e897780dd4 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Fri, 22 Aug 2025 23:05:20 -0700 Subject: [PATCH 20/25] move more backfill log to info, clean up imports --- src/service/rooms/timeline/backfill.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/service/rooms/timeline/backfill.rs b/src/service/rooms/timeline/backfill.rs index a885f2a3..d91075e1 100644 --- a/src/service/rooms/timeline/backfill.rs +++ b/src/service/rooms/timeline/backfill.rs @@ -1,13 +1,13 @@ use std::iter::once; use conduwuit_core::{ - Result, debug, debug_warn, implement, info, + Result, debug, implement, info, matrix::{ event::Event, pdu::{PduCount, PduId, RawPduId}, }, utils::{IterStream, ReadyExt}, - validated, warn, + validated, }; use futures::{FutureExt, StreamExt}; use ruma::{ @@ -15,9 +15,7 @@ use ruma::{ api::federation, events::{ StateEventType, TimelineEventType, room::power_levels::RoomPowerLevelsEventContent, - }, - uint, -}; + }, UInt}; use serde_json::value::RawValue as RawJsonValue; use super::ExtractBody; @@ -109,7 +107,7 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re federation::backfill::get_backfill::v1::Request { room_id: room_id.to_owned(), v: vec![first_pdu.1.event_id().to_owned()], - limit: uint!(100), + limit: UInt::from(self.services.server.config.max_fetch_prev_events), }, ) .await; @@ -117,13 +115,13 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re | Ok(response) => { for pdu in response.pdus { if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await { - debug_warn!("Failed to add backfilled pdu in room {room_id}: {e}"); + info!("Failed to add backfilled pdu in room {room_id}: {e}"); } } return Ok(()); }, | Err(e) => { - warn!("{backfill_server} failed to provide backfill for room {room_id}: {e}"); + info!("{backfill_server} failed to provide backfill for room {room_id}: {e}"); }, } } From e2995df505bc458f303c7616f76e1d29240db26e Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Mon, 25 Aug 2025 20:51:55 -0700 Subject: [PATCH 21/25] reduce log volume (keeps 2 infos) --- src/api/server/send.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/api/server/send.rs b/src/api/server/send.rs index e7e4e9fd..9e3895e4 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -79,7 +79,7 @@ pub(crate) async fn send_transaction_message_route( } let txn_start_time = Instant::now(); - info!( + trace!( pdus = body.pdus.len(), edus = body.edus.len(), id = ?body.transaction_id, @@ -102,7 +102,7 @@ pub(crate) async fn send_transaction_message_route( .filter_map(Result::ok) .stream(); - info!( + trace!( pdus = body.pdus.len(), edus = body.edus.len(), elapsed = ?txn_start_time.elapsed(), @@ -198,7 +198,7 @@ async fn handle_room( .and_then(|(_, event_id, value)| async move { services.server.check_running()?; let pdu_start_time = Instant::now(); - info!( + trace!( %room_id, %event_id, pdu = n + 1, From 423ca4e664d601d63774bc83955c8b4443f1cb19 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Mon, 25 Aug 2025 21:25:42 -0700 Subject: [PATCH 22/25] adjust log volume --- src/service/sending/sender.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 5498afc0..3c3e126e 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -882,7 +882,7 @@ impl Service { .execute_on(&self.services.client.sender, &server, request) .await .inspect(|_| { - info!(%txn_id, %server, "Sent {} PDUs, {} EDUs", pdu_count, edu_count); + trace!(%txn_id, %server, "Sent {} PDUs, {} EDUs", pdu_count, edu_count); }) .inspect_err(|e| { info!(%txn_id, %server, "Failed to send transaction ({} PDUs, {} EDUs): {e:?}", pdu_count, edu_count); @@ -890,7 +890,7 @@ impl Service { for (event_id, result) in result.iter().flat_map(|resp| resp.pdus.iter()) { if let Err(e) = result { - warn!( + info!( %txn_id, %server, "error sending PDU {event_id} to remote server: {e:?}" ); From 91fed80d4ffbe07492b9db47cb5e43b856c7a216 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Mon, 25 Aug 2025 21:26:09 -0700 Subject: [PATCH 23/25] delete a bad event rate limiter (bad, do not use) --- .../fetch_and_handle_outliers.rs | 24 ------------------- 1 file changed, 24 deletions(-) diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index c72cce87..e961e342 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -70,30 +70,6 @@ where let mut events_all = HashSet::with_capacity(todo_auth_events.len()); while let Some(next_id) = todo_auth_events.pop_front() { - if let Some((time, tries)) = self - .services - .globals - .bad_event_ratelimiter - .read() - .get(&*next_id) - { - // Exponential backoff - const MIN_DURATION: u64 = 60 * 2; - const MAX_DURATION: u64 = 60 * 60; - if continue_exponential_backoff_secs( - MIN_DURATION, - MAX_DURATION, - time.elapsed(), - *tries, - ) { - debug_warn!( - tried = ?*tries, - elapsed = ?time.elapsed(), - "Backing off from {next_id}", - ); - continue; - } - } if events_all.contains(&next_id) { continue; From 845a64b73dd9f9be2e2ef62da12e2c88d4ddd23d Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Mon, 25 Aug 2025 22:12:53 -0700 Subject: [PATCH 24/25] demote a bunch of logs --- src/service/federation/execute.rs | 13 +++++-------- .../event_handler/fetch_and_handle_outliers.rs | 2 +- src/service/sending/sender.rs | 2 +- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 9a3cd828..461467da 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -5,10 +5,7 @@ use std::{ }; use bytes::Bytes; -use conduwuit::{ - Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, err, - error::inspect_debug_log, implement, trace, utils::string::EMPTY, warn, -}; +use conduwuit::{Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, err, error::inspect_debug_log, implement, trace, utils::string::EMPTY, warn, info}; use http::{HeaderValue, header::AUTHORIZATION}; use ipaddress::IPAddress; use reqwest::{Client, Method, Request, Response, Url}; @@ -197,9 +194,9 @@ fn handle_error( ) -> Result { if e.is_timeout() || e.is_connect() { e = e.without_url(); - warn!(?url, "network error while sending federation request: {e:?}"); + trace!(?url, "network error while sending federation request: {e:?}"); } else if e.is_redirect() { - warn!( + trace!( method = ?method, url = ?url, final_url = ?e.url(), @@ -208,7 +205,7 @@ fn handle_error( e, ); } else { - warn!(?url, "failed to send federation request: {e:?}"); + trace!(?url, "failed to send federation request: {e:?}"); } let mut nice_error = "Request failed".to_owned(); @@ -217,7 +214,7 @@ fn handle_error( write!(nice_error, ": {source:?}").expect("writing to string should not fail"); src = source.source(); } - warn!(nice_error, "Federation request error"); + info!(nice_error, "Federation request error"); Err(e.into()) } diff --git a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs index e961e342..4fe4ad0a 100644 --- a/src/service/rooms/event_handler/fetch_and_handle_outliers.rs +++ b/src/service/rooms/event_handler/fetch_and_handle_outliers.rs @@ -4,7 +4,7 @@ use std::{ }; use conduwuit::{ - Event, PduEvent, debug, debug_error, debug_warn, implement, + Event, PduEvent, debug, debug_error, implement, matrix::event::gen_event_id_canonical_json, trace, utils::continue_exponential_backoff_secs, warn, }; diff --git a/src/service/sending/sender.rs b/src/service/sending/sender.rs index 3c3e126e..ac82669f 100644 --- a/src/service/sending/sender.rs +++ b/src/service/sending/sender.rs @@ -890,7 +890,7 @@ impl Service { for (event_id, result) in result.iter().flat_map(|resp| resp.pdus.iter()) { if let Err(e) = result { - info!( + trace!( %txn_id, %server, "error sending PDU {event_id} to remote server: {e:?}" ); From 6e7a3cf6cb4d3d85d2ea681599085fc001f945d3 Mon Sep 17 00:00:00 2001 From: Jacob Taylor Date: Mon, 25 Aug 2025 23:26:28 -0700 Subject: [PATCH 25/25] delete more imports to quiet cargo --- src/api/server/send.rs | 2 +- src/service/federation/execute.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/server/send.rs b/src/api/server/send.rs index 9e3895e4..601da06c 100644 --- a/src/api/server/send.rs +++ b/src/api/server/send.rs @@ -3,7 +3,7 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant}; use axum::extract::State; use axum_client_ip::InsecureClientIp; use conduwuit::{ - Err, Error, Result, debug, + Err, Error, Result, debug::INFO_SPAN_LEVEL, debug_warn, err, error, info, result::LogErr, diff --git a/src/service/federation/execute.rs b/src/service/federation/execute.rs index 461467da..5fe10ec9 100644 --- a/src/service/federation/execute.rs +++ b/src/service/federation/execute.rs @@ -5,7 +5,7 @@ use std::{ }; use bytes::Bytes; -use conduwuit::{Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, err, error::inspect_debug_log, implement, trace, utils::string::EMPTY, warn, info}; +use conduwuit::{Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, err, error::inspect_debug_log, implement, trace, utils::string::EMPTY, info}; use http::{HeaderValue, header::AUTHORIZATION}; use ipaddress::IPAddress; use reqwest::{Client, Method, Request, Response, Url};