Compare commits

...

25 commits

Author SHA1 Message Date
Jacob Taylor
2106e4e9f4 add event_id to log entry 2025-08-26 18:10:08 -07:00
Jacob Taylor
8c8c023120 delete more imports to quiet cargo 2025-08-26 18:10:08 -07:00
Jacob Taylor
5f2a24d7b2 demote a bunch of logs 2025-08-26 18:10:08 -07:00
Jacob Taylor
1934711276 delete a bad event rate limiter (bad, do not use) 2025-08-26 18:10:08 -07:00
Jacob Taylor
1807643371 adjust log volume 2025-08-26 18:10:08 -07:00
Jacob Taylor
6d6593c5eb reduce log volume (keeps 2 infos) 2025-08-26 18:10:08 -07:00
Jacob Taylor
63ee2dacea move more backfill log to info, clean up imports 2025-08-26 18:10:08 -07:00
Jacob Taylor
b8ead96d3e log which room is being backfilled 2025-08-26 18:10:08 -07:00
Jacob Taylor
7d9b514696 fix warn by removing unused debug imports 2025-08-26 18:10:08 -07:00
Jacob Taylor
4baf48e214 exponential backoff is now just bees. did you want bees? no? well you have them now. congrats 2025-08-26 18:10:08 -07:00
Jacob Taylor
6483e984ce fix too many infos 2025-08-26 18:10:08 -07:00
Jacob Taylor
697e7aa2cd more funny settings (part 3 of 12) 2025-08-26 18:10:08 -07:00
Jacob Taylor
081487a413 sender_workers scaling. this time, with feeling! 2025-08-26 18:10:08 -07:00
Jacob Taylor
3e9cf3f494 vehicle loan documentation now available at window 7 2025-08-26 18:10:08 -07:00
Jacob Taylor
1e4cf59ab8 lock the getter instead ??? c/o M 2025-08-26 18:10:08 -07:00
Jacob Taylor
008d90b118 make fetching key room events less smart 2025-08-26 18:10:07 -07:00
Jacob Taylor
54eab4775a change rocksdb stats level to 3
scale rocksdb background jobs and subcompactions

change rocksdb default error level to info from error

delete unused num_threads function

fix warns from cargo
2025-08-26 18:10:07 -07:00
nexy7574
0a74dfe5a5 log which room struggled to get mainline depth 2025-08-26 18:10:07 -07:00
nexy7574
ecdce68ae3 more logs 2025-08-26 18:10:07 -07:00
nexy7574
43574118aa Fix room ID check 2025-08-26 18:10:07 -07:00
nexy7574
e7399409b4 Kick up a fuss when m.room.create is unfindable 2025-08-26 18:10:07 -07:00
nexy7574
1d97861332 Note about ruma#2064 in TODO 2025-08-26 18:10:07 -07:00
nexy7574
e8bba3ba37 fix an auth rule not applying correctly 2025-08-26 18:10:07 -07:00
Jacob Taylor
a57df9af37 upgrade some settings to enable 5g in continuwuity
enable converged 6g at the edge in continuwuity

better stateinfo_cache_capacity default

better roomid_spacehierarchy_cache_capacity

make sender workers default better and clamp value to core count

update sender workers documentation

add more parallelism_scaled and make them public

update 1 document
2025-08-26 18:10:07 -07:00
Jacob Taylor
45301d4e41 bump the number of allowed immutable memtables by 1, to allow for greater flood protection
this should probably not be applied if you have rocksdb_atomic_flush = false (the default)
2025-08-26 18:10:07 -07:00
19 changed files with 112 additions and 133 deletions

View file

@ -1066,7 +1066,7 @@
# 3 to 5 = Statistics with possible performance impact.
# 6 = All statistics.
#
#rocksdb_stats_level = 1
#rocksdb_stats_level = 3
# This is a password that can be configured that will let you login to the
# server bot account (currently `@conduit`) for emergency troubleshooting
@ -1680,11 +1680,9 @@
#stream_amplification = 1024
# Number of sender task workers; determines sender parallelism. Default is
# '0' which means the value is determined internally, likely matching the
# number of tokio worker-threads or number of cores, etc. Override by
# setting a non-zero value.
# number of CPU cores. Override by setting a different value.
#
#sender_workers = 0
#sender_workers = 4
# Enables listener sockets; can be set to false to disable listening. This
# option is intended for developer/diagnostic purposes only.

View file

@ -3,7 +3,7 @@ use std::{collections::BTreeMap, net::IpAddr, time::Instant};
use axum::extract::State;
use axum_client_ip::InsecureClientIp;
use conduwuit::{
Err, Error, Result, debug,
Err, Error, Result,
debug::INFO_SPAN_LEVEL,
debug_warn, err, error, info,
result::LogErr,
@ -79,7 +79,7 @@ pub(crate) async fn send_transaction_message_route(
}
let txn_start_time = Instant::now();
info!(
trace!(
pdus = body.pdus.len(),
edus = body.edus.len(),
id = ?body.transaction_id,
@ -102,7 +102,7 @@ pub(crate) async fn send_transaction_message_route(
.filter_map(Result::ok)
.stream();
info!(
trace!(
pdus = body.pdus.len(),
edus = body.edus.len(),
elapsed = ?txn_start_time.elapsed(),
@ -198,7 +198,7 @@ async fn handle_room(
.and_then(|(_, event_id, value)| async move {
services.server.check_running()?;
let pdu_start_time = Instant::now();
info!(
trace!(
%room_id,
%event_id,
pdu = n + 1,

View file

@ -1235,7 +1235,7 @@ pub struct Config {
/// 3 to 5 = Statistics with possible performance impact.
/// 6 = All statistics.
///
/// default: 1
/// default: 3
#[serde(default = "default_rocksdb_stats_level")]
pub rocksdb_stats_level: u8,
@ -1917,12 +1917,10 @@ pub struct Config {
pub stream_amplification: usize,
/// Number of sender task workers; determines sender parallelism. Default is
/// '0' which means the value is determined internally, likely matching the
/// number of tokio worker-threads or number of cores, etc. Override by
/// setting a non-zero value.
/// core count. Override by setting a different value.
///
/// default: 0
#[serde(default)]
/// default: core count
#[serde(default = "default_sender_workers")]
pub sender_workers: usize,
/// Enables listener sockets; can be set to false to disable listening. This
@ -2153,45 +2151,48 @@ fn default_database_backups_to_keep() -> i16 { 1 }
fn default_db_write_buffer_capacity_mb() -> f64 { 48.0 + parallelism_scaled_f64(4.0) }
fn default_db_cache_capacity_mb() -> f64 { 128.0 + parallelism_scaled_f64(64.0) }
fn default_db_cache_capacity_mb() -> f64 { 512.0 + parallelism_scaled_f64(512.0) }
fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(10_000).saturating_add(100_000) }
fn default_pdu_cache_capacity() -> u32 { parallelism_scaled_u32(50_000).saturating_add(100_000) }
fn default_cache_capacity_modifier() -> f64 { 1.0 }
fn default_auth_chain_cache_capacity() -> u32 {
parallelism_scaled_u32(10_000).saturating_add(100_000)
}
fn default_shorteventid_cache_capacity() -> u32 {
parallelism_scaled_u32(50_000).saturating_add(100_000)
}
fn default_shorteventid_cache_capacity() -> u32 {
parallelism_scaled_u32(100_000).saturating_add(100_000)
}
fn default_eventidshort_cache_capacity() -> u32 {
parallelism_scaled_u32(25_000).saturating_add(100_000)
parallelism_scaled_u32(50_000).saturating_add(100_000)
}
fn default_eventid_pdu_cache_capacity() -> u32 {
parallelism_scaled_u32(25_000).saturating_add(100_000)
parallelism_scaled_u32(50_000).saturating_add(100_000)
}
fn default_shortstatekey_cache_capacity() -> u32 {
parallelism_scaled_u32(10_000).saturating_add(100_000)
parallelism_scaled_u32(50_000).saturating_add(100_000)
}
fn default_statekeyshort_cache_capacity() -> u32 {
parallelism_scaled_u32(10_000).saturating_add(100_000)
parallelism_scaled_u32(50_000).saturating_add(100_000)
}
fn default_servernameevent_data_cache_capacity() -> u32 {
parallelism_scaled_u32(100_000).saturating_add(500_000)
parallelism_scaled_u32(100_000).saturating_add(100_000)
}
fn default_stateinfo_cache_capacity() -> u32 { parallelism_scaled_u32(100) }
fn default_stateinfo_cache_capacity() -> u32 {
parallelism_scaled_u32(500).clamp(100, 12000)
}
fn default_roomid_spacehierarchy_cache_capacity() -> u32 { parallelism_scaled_u32(1000) }
fn default_roomid_spacehierarchy_cache_capacity() -> u32 {
parallelism_scaled_u32(500).clamp(100, 12000) }
fn default_dns_cache_entries() -> u32 { 32768 }
fn default_dns_cache_entries() -> u32 { 327680 }
fn default_dns_min_ttl() -> u64 { 60 * 180 }
@ -2297,7 +2298,7 @@ fn default_typing_client_timeout_max_s() -> u64 { 45 }
fn default_rocksdb_recovery_mode() -> u8 { 1 }
fn default_rocksdb_log_level() -> String { "error".to_owned() }
fn default_rocksdb_log_level() -> String { "info".to_owned() }
fn default_rocksdb_log_time_to_roll() -> usize { 0 }
@ -2329,7 +2330,7 @@ fn default_rocksdb_compression_level() -> i32 { 32767 }
#[allow(clippy::doc_markdown)]
fn default_rocksdb_bottommost_compression_level() -> i32 { 32767 }
fn default_rocksdb_stats_level() -> u8 { 1 }
fn default_rocksdb_stats_level() -> u8 { 3 }
// I know, it's a great name
#[must_use]
@ -2384,14 +2385,13 @@ fn default_admin_log_capture() -> String {
fn default_admin_room_tag() -> String { "m.server_notice".to_owned() }
#[allow(clippy::as_conversions, clippy::cast_precision_loss)]
fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) }
pub fn parallelism_scaled_f64(val: f64) -> f64 { val * (sys::available_parallelism() as f64) }
fn parallelism_scaled_u32(val: u32) -> u32 {
let val = val.try_into().expect("failed to cast u32 to usize");
parallelism_scaled(val).try_into().unwrap_or(u32::MAX)
}
pub fn parallelism_scaled_u32(val: u32) -> u32 { val.saturating_mul(sys::available_parallelism() as u32) }
fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) }
pub fn parallelism_scaled_i32(val: i32) -> i32 { val.saturating_mul(sys::available_parallelism() as i32) }
pub fn parallelism_scaled(val: usize) -> usize { val.saturating_mul(sys::available_parallelism()) }
fn default_trusted_server_batch_size() -> usize { 256 }
@ -2411,6 +2411,8 @@ fn default_stream_width_scale() -> f32 { 1.0 }
fn default_stream_amplification() -> usize { 1024 }
fn default_sender_workers() -> usize { parallelism_scaled(1) }
fn default_client_receive_timeout() -> u64 { 75 }
fn default_client_request_timeout() -> u64 { 180 }

View file

@ -13,6 +13,7 @@ use ruma::{
power_levels::RoomPowerLevelsEventContent,
third_party_invite::RoomThirdPartyInviteEventContent,
},
EventId,
int,
serde::{Base64, Raw},
};
@ -21,7 +22,6 @@ use serde::{
de::{Error as _, IgnoredAny},
};
use serde_json::{from_str as from_json_str, value::RawValue as RawJsonValue};
use super::{
Error, Event, Result, StateEventType, StateKey, TimelineEventType,
power_levels::{
@ -149,8 +149,8 @@ where
for<'a> &'a E: Event + Send,
{
debug!(
event_id = %incoming_event.event_id(),
event_type = ?incoming_event.event_type(),
event_id = format!("{}", incoming_event.event_id()),
event_type = format!("{}", incoming_event.event_type()),
"auth_check beginning"
);
@ -219,7 +219,7 @@ where
/*
// TODO: In the past this code was commented as it caused problems with Synapse. This is no
// longer the case. This needs to be implemented.
// See also: https://github.com/ruma/ruma/pull/2064
// See also: https://github.com/ruma/ruma/pull/2064
//
// 2. Reject if auth_events
// a. auth_events cannot have duplicate keys since it's a BTree
@ -242,20 +242,44 @@ where
}
*/
let (room_create_event, power_levels_event, sender_member_event) = join3(
fetch_state(&StateEventType::RoomCreate, ""),
fetch_state(&StateEventType::RoomPowerLevels, ""),
fetch_state(&StateEventType::RoomMember, sender.as_str()),
)
.await;
// let (room_create_event, power_levels_event, sender_member_event) = join3(
// fetch_state(&StateEventType::RoomCreate, ""),
// fetch_state(&StateEventType::RoomPowerLevels, ""),
// fetch_state(&StateEventType::RoomMember, sender.as_str()),
// )
// .await;
let room_create_event = fetch_state(&StateEventType::RoomCreate, "").await;
let power_levels_event = fetch_state(&StateEventType::RoomPowerLevels, "").await;
let sender_member_event = fetch_state(&StateEventType::RoomMember, sender.as_str()).await;
let room_create_event = match room_create_event {
| None => {
warn!("no m.room.create event in auth chain");
error!(
create_event = room_create_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(),
power_levels = power_levels_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(),
member_event = sender_member_event.as_ref().map(Event::event_id).unwrap_or(<&EventId>::try_from("$unknown").unwrap()).as_str(),
"no m.room.create event found for {} ({})!",
incoming_event.event_id().as_str(),
incoming_event.room_id().as_str()
);
return Ok(false);
},
| Some(e) => e,
};
// just re-check 1.2 to work around a bug
let Some(room_id_server_name) = incoming_event.room_id().server_name() else {
warn!("room ID has no servername");
return Ok(false);
};
if room_id_server_name != room_create_event.sender().server_name() {
warn!(
"servername of room ID origin ({}) does not match servername of m.room.create sender ({})",
room_id_server_name,
room_create_event.sender().server_name());
return Ok(false);
}
if incoming_event.room_id() != room_create_event.room_id() {
warn!("room_id of incoming event does not match room_id of m.room.create event");

View file

@ -733,8 +733,12 @@ where
Fut: Future<Output = Option<E>> + Send,
E: Event + Send + Sync,
{
let mut room_id = None;
while let Some(sort_ev) = event {
debug!(event_id = sort_ev.event_id().as_str(), "mainline");
if room_id.is_none() {
room_id = Some(sort_ev.room_id().to_owned());
}
let id = sort_ev.event_id();
if let Some(depth) = mainline_map.get(id) {
@ -753,7 +757,7 @@ where
}
}
}
// Did not find a power level event so we default to zero
warn!("could not find a power event in the mainline map for {room_id:?}, defaulting to zero depth");
Ok(0)
}

View file

@ -29,7 +29,7 @@ fn descriptor_cf_options(
set_table_options(&mut opts, &desc, cache)?;
opts.set_min_write_buffer_number(1);
opts.set_max_write_buffer_number(2);
opts.set_max_write_buffer_number(3);
opts.set_write_buffer_size(desc.write_size);
opts.set_target_file_size_base(desc.file_size);

View file

@ -1,8 +1,6 @@
use std::{cmp, convert::TryFrom};
use conduwuit::{Config, Result, utils};
use conduwuit::{Config, Result};
use rocksdb::{Cache, DBRecoveryMode, Env, LogLevel, Options, statistics::StatsLevel};
use conduwuit::config::{parallelism_scaled_i32, parallelism_scaled_u32};
use super::{cf_opts::cache_size_f64, logger::handle as handle_log};
/// Create database-wide options suitable for opening the database. This also
@ -23,8 +21,8 @@ pub(crate) fn db_options(config: &Config, env: &Env, row_cache: &Cache) -> Resul
set_logging_defaults(&mut opts, config);
// Processing
opts.set_max_background_jobs(num_threads::<i32>(config)?);
opts.set_max_subcompactions(num_threads::<u32>(config)?);
opts.set_max_background_jobs(parallelism_scaled_i32(1));
opts.set_max_subcompactions(parallelism_scaled_u32(1));
opts.set_avoid_unnecessary_blocking_io(true);
opts.set_max_file_opening_threads(0);
@ -126,15 +124,3 @@ fn set_logging_defaults(opts: &mut Options, config: &Config) {
opts.set_callback_logger(rocksdb_log_level, &handle_log);
}
}
fn num_threads<T: TryFrom<usize>>(config: &Config) -> Result<T> {
const MIN_PARALLELISM: usize = 2;
let requested = if config.rocksdb_parallelism_threads != 0 {
config.rocksdb_parallelism_threads
} else {
utils::available_parallelism()
};
utils::math::try_into::<T, usize>(cmp::max(MIN_PARALLELISM, requested))
}

View file

@ -5,10 +5,7 @@ use std::{
};
use bytes::Bytes;
use conduwuit::{
Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, debug_error, debug_warn, err,
error::inspect_debug_log, implement, trace, utils::string::EMPTY, warn,
};
use conduwuit::{Err, Error, Result, debug, debug::INFO_SPAN_LEVEL, err, error::inspect_debug_log, implement, trace, utils::string::EMPTY, info};
use http::{HeaderValue, header::AUTHORIZATION};
use ipaddress::IPAddress;
use reqwest::{Client, Method, Request, Response, Url};
@ -197,9 +194,9 @@ fn handle_error(
) -> Result {
if e.is_timeout() || e.is_connect() {
e = e.without_url();
warn!(?url, "network error while sending federation request: {e:?}");
trace!(?url, "network error while sending federation request: {e:?}");
} else if e.is_redirect() {
warn!(
trace!(
method = ?method,
url = ?url,
final_url = ?e.url(),
@ -208,7 +205,7 @@ fn handle_error(
e,
);
} else {
warn!(?url, "failed to send federation request: {e:?}");
trace!(?url, "failed to send federation request: {e:?}");
}
let mut nice_error = "Request failed".to_owned();
@ -217,7 +214,7 @@ fn handle_error(
write!(nice_error, ": {source:?}").expect("writing to string should not fail");
src = source.source();
}
warn!(nice_error, "Federation request error");
info!(nice_error, "Federation request error");
Err(e.into())
}

View file

@ -100,7 +100,7 @@ impl Service {
/// Pings the presence of the given user in the given room, setting the
/// specified state.
pub async fn ping_presence(&self, user_id: &UserId, new_state: &PresenceState) -> Result<()> {
const REFRESH_TIMEOUT: u64 = 60 * 1000;
const REFRESH_TIMEOUT: u64 = 60 * 1000 * 4;
let last_presence = self.db.get_presence(user_id).await;
let state_changed = match last_presence {

View file

@ -53,9 +53,9 @@ impl Resolver {
opts.cache_size = config.dns_cache_entries as usize;
opts.preserve_intermediates = true;
opts.negative_min_ttl = Some(Duration::from_secs(config.dns_min_ttl_nxdomain));
opts.negative_max_ttl = Some(Duration::from_secs(60 * 60 * 24 * 30));
opts.negative_max_ttl = Some(Duration::from_secs(60 * 60 * 24));
opts.positive_min_ttl = Some(Duration::from_secs(config.dns_min_ttl));
opts.positive_max_ttl = Some(Duration::from_secs(60 * 60 * 24 * 7));
opts.positive_max_ttl = Some(Duration::from_secs(60 * 60 * 24));
opts.timeout = Duration::from_secs(config.dns_timeout);
opts.attempts = config.dns_attempts as usize;
opts.try_tcp_on_error = config.dns_tcp_fallback;

View file

@ -4,7 +4,7 @@ use std::{
};
use conduwuit::{
Event, PduEvent, debug, debug_error, debug_warn, implement,
Event, PduEvent, debug, debug_error, implement,
matrix::event::gen_event_id_canonical_json, trace, utils::continue_exponential_backoff_secs,
warn,
};
@ -70,30 +70,6 @@ where
let mut events_all = HashSet::with_capacity(todo_auth_events.len());
while let Some(next_id) = todo_auth_events.pop_front() {
if let Some((time, tries)) = self
.services
.globals
.bad_event_ratelimiter
.read()
.get(&*next_id)
{
// Exponential backoff
const MIN_DURATION: u64 = 60 * 2;
const MAX_DURATION: u64 = 60 * 60 * 8;
if continue_exponential_backoff_secs(
MIN_DURATION,
MAX_DURATION,
time.elapsed(),
*tries,
) {
debug_warn!(
tried = ?*tries,
elapsed = ?time.elapsed(),
"Backing off from {next_id}",
);
continue;
}
}
if events_all.contains(&next_id) {
continue;

View file

@ -122,11 +122,8 @@ where
}
// The original create event must be in the auth events
if !matches!(
auth_events.get(&(StateEventType::RoomCreate, String::new().into())),
Some(_) | None
) {
return Err!(Request(InvalidParam("Incoming event refers to wrong create event.")));
if !auth_events.contains_key(&(StateEventType::RoomCreate, String::new().into())) {
return Err!(Request(InvalidParam("Incoming event refers to wrong create event. event_id={event_id}")));
}
let state_fetch = |ty: &StateEventType, sk: &str| {

View file

@ -46,7 +46,7 @@ where
{
// Exponential backoff
const MIN_DURATION: u64 = 5 * 60;
const MAX_DURATION: u64 = 60 * 60 * 24;
const MAX_DURATION: u64 = 60 * 60;
if continue_exponential_backoff_secs(MIN_DURATION, MAX_DURATION, time.elapsed(), *tries) {
debug!(
?tries,

View file

@ -5,7 +5,7 @@ use conduwuit::{
matrix::{Event, EventTypeExt, PduEvent, StateKey, state_res},
trace,
utils::stream::{BroadbandExt, ReadyExt},
warn,
warn
};
use futures::{FutureExt, StreamExt, future::ready};
use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType};
@ -175,7 +175,7 @@ where
let extremities: Vec<_> = self
.services
.state
.get_forward_extremities(room_id)
.get_forward_extremities(room_id, &state_lock)
.map(ToOwned::to_owned)
.ready_filter(|event_id| {
// Remove any that are referenced by this incoming event's prev_events
@ -193,6 +193,8 @@ where
.collect()
.await;
if extremities.len() == 0 { info!("Retained zero extremities when upgrading outlier PDU to timeline PDU with {} previous events, event id: {}", incoming_pdu.prev_events.len(), incoming_pdu.event_id) }
debug!(
"Retained {} extremities checked against {} prev_events",
extremities.len(),

View file

@ -388,6 +388,7 @@ impl Service {
pub fn get_forward_extremities<'a>(
&'a self,
room_id: &'a RoomId,
_state_lock: &'a RoomMutexGuard,
) -> impl Stream<Item = &EventId> + Send + '_ {
let prefix = (room_id, Interfix);

View file

@ -1,13 +1,13 @@
use std::iter::once;
use conduwuit_core::{
Result, debug, debug_warn, implement, info,
Result, debug, implement, info,
matrix::{
event::Event,
pdu::{PduCount, PduId, RawPduId},
},
utils::{IterStream, ReadyExt},
validated, warn,
validated,
};
use futures::{FutureExt, StreamExt};
use ruma::{
@ -15,9 +15,7 @@ use ruma::{
api::federation,
events::{
StateEventType, TimelineEventType, room::power_levels::RoomPowerLevelsEventContent,
},
uint,
};
}, UInt};
use serde_json::value::RawValue as RawJsonValue;
use super::ExtractBody;
@ -100,7 +98,7 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
.boxed();
while let Some(ref backfill_server) = servers.next().await {
info!("Asking {backfill_server} for backfill");
info!("Asking {backfill_server} for backfill of room {room_id}");
let response = self
.services
.sending
@ -109,7 +107,7 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
federation::backfill::get_backfill::v1::Request {
room_id: room_id.to_owned(),
v: vec![first_pdu.1.event_id().to_owned()],
limit: uint!(100),
limit: UInt::from(self.services.server.config.max_fetch_prev_events),
},
)
.await;
@ -117,13 +115,13 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
| Ok(response) => {
for pdu in response.pdus {
if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await {
debug_warn!("Failed to add backfilled pdu in room {room_id}: {e}");
info!("Failed to add backfilled pdu in room {room_id}: {e}");
}
}
return Ok(());
},
| Err(e) => {
warn!("{backfill_server} failed to provide backfill for room {room_id}: {e}");
info!("{backfill_server} failed to provide backfill for room {room_id}: {e}");
},
}
}

View file

@ -42,7 +42,7 @@ pub async fn create_hash_and_sign_event(
let prev_events: Vec<OwnedEventId> = self
.services
.state
.get_forward_extremities(room_id)
.get_forward_extremities(room_id, _mutex_lock)
.take(20)
.map(Into::into)
.collect()

View file

@ -401,16 +401,10 @@ impl Service {
fn num_senders(args: &crate::Args<'_>) -> usize {
const MIN_SENDERS: usize = 1;
// Limit the number of senders to the number of workers threads or number of
// cores, conservatively.
let max_senders = args
.server
.metrics
.num_workers()
.min(available_parallelism());
// Limit the maximum number of senders to the number of cores.
let max_senders = available_parallelism();
// If the user doesn't override the default 0, this is intended to then default
// to 1 for now as multiple senders is experimental.
// default is 4 senders. clamp between 1 and core count.
args.server
.config
.sender_workers

View file

@ -882,7 +882,7 @@ impl Service {
.execute_on(&self.services.client.sender, &server, request)
.await
.inspect(|_| {
info!(%txn_id, %server, "Sent {} PDUs, {} EDUs", pdu_count, edu_count);
trace!(%txn_id, %server, "Sent {} PDUs, {} EDUs", pdu_count, edu_count);
})
.inspect_err(|e| {
info!(%txn_id, %server, "Failed to send transaction ({} PDUs, {} EDUs): {e:?}", pdu_count, edu_count);
@ -890,7 +890,7 @@ impl Service {
for (event_id, result) in result.iter().flat_map(|resp| resp.pdus.iter()) {
if let Err(e) = result {
warn!(
trace!(
%txn_id, %server,
"error sending PDU {event_id} to remote server: {e:?}"
);