mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-06-30 12:24:28 +02:00
Compare commits
4 commits
2a66f81040
...
d54d2fdedc
Author | SHA1 | Date | |
---|---|---|---|
|
d54d2fdedc | ||
|
3fe99fbfe7 | ||
|
51185daca2 | ||
|
6e438c8448 |
7 changed files with 25 additions and 30 deletions
|
@ -1249,6 +1249,7 @@ async fn join_room_by_id_helper_remote(
|
|||
services.rooms.timeline.get_pdu(event_id).await.ok()
|
||||
};
|
||||
|
||||
debug!("running stateres check on send_join parsed PDU");
|
||||
let auth_check = state_res::event_auth::auth_check(
|
||||
&state_res::RoomVersion::new(&room_version_id)?,
|
||||
&parsed_join_pdu,
|
||||
|
|
|
@ -1828,7 +1828,7 @@ pub struct Config {
|
|||
/// setting a non-zero value.
|
||||
///
|
||||
/// default: 0
|
||||
#[serde(default)]
|
||||
#[serde(default = "default_sender_workers")]
|
||||
pub sender_workers: usize,
|
||||
|
||||
/// Enables listener sockets; can be set to false to disable listening. This
|
||||
|
@ -2315,6 +2315,8 @@ fn default_stream_width_scale() -> f32 { 1.0 }
|
|||
|
||||
fn default_stream_amplification() -> usize { 1024 }
|
||||
|
||||
fn default_sender_workers() -> usize { 4 }
|
||||
|
||||
fn default_client_receive_timeout() -> u64 { 75 }
|
||||
|
||||
fn default_client_request_timeout() -> u64 { 180 }
|
||||
|
|
|
@ -609,7 +609,7 @@ where
|
|||
let fetch_state = |ty: &StateEventType, key: &str| {
|
||||
future::ready(auth_state.get(&ty.with_state_key(key)))
|
||||
};
|
||||
|
||||
debug!("running auth check on {:?}", event.event_id());
|
||||
let auth_result =
|
||||
auth_check(room_version, &event, current_third_party.as_ref(), fetch_state).await;
|
||||
|
||||
|
@ -728,7 +728,7 @@ where
|
|||
{
|
||||
let mut room_id = None;
|
||||
while let Some(sort_ev) = event {
|
||||
debug!(event_id = sort_ev.event_id().as_str(), "mainline");
|
||||
trace!(event_id = sort_ev.event_id().as_str(), "mainline");
|
||||
if room_id.is_none() {
|
||||
room_id = Some(sort_ev.room_id().to_owned());
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ pub(super) async fn handle_outlier_pdu<'a>(
|
|||
// 5. Reject "due to auth events" if can't get all the auth events or some of
|
||||
// the auth events are also rejected "due to auth events"
|
||||
// NOTE: Step 5 is not applied anymore because it failed too often
|
||||
debug!("Fetching auth events");
|
||||
debug!("Fetching auth events for {}", incoming_pdu.event_id);
|
||||
Box::pin(self.fetch_and_handle_outliers(
|
||||
origin,
|
||||
&incoming_pdu.auth_events,
|
||||
|
@ -88,12 +88,12 @@ pub(super) async fn handle_outlier_pdu<'a>(
|
|||
|
||||
// 6. Reject "due to auth events" if the event doesn't pass auth based on the
|
||||
// auth events
|
||||
debug!("Checking based on auth events");
|
||||
debug!("Checking {} based on auth events", incoming_pdu.event_id);
|
||||
// Build map of auth events
|
||||
let mut auth_events = HashMap::with_capacity(incoming_pdu.auth_events.len());
|
||||
for id in &incoming_pdu.auth_events {
|
||||
let Ok(auth_event) = self.services.timeline.get_pdu(id).await else {
|
||||
warn!("Could not find auth event {id}");
|
||||
warn!("Could not find auth event {id} for {}", incoming_pdu.event_id);
|
||||
continue;
|
||||
};
|
||||
|
||||
|
@ -128,6 +128,7 @@ pub(super) async fn handle_outlier_pdu<'a>(
|
|||
ready(auth_events.get(&key))
|
||||
};
|
||||
|
||||
debug!("running auth check to handle outlier pdu {:?}", incoming_pdu.event_id);
|
||||
let auth_check = state_res::event_auth::auth_check(
|
||||
&to_room_version(&room_version_id),
|
||||
&incoming_pdu,
|
||||
|
|
|
@ -1,12 +1,6 @@
|
|||
use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant};
|
||||
|
||||
use conduwuit::{
|
||||
Err, Result, debug, debug_info, err, implement,
|
||||
matrix::{EventTypeExt, PduEvent, StateKey, state_res},
|
||||
trace,
|
||||
utils::stream::{BroadbandExt, ReadyExt},
|
||||
warn,
|
||||
};
|
||||
use conduwuit::{Err, Result, debug, debug_info, err, implement, matrix::{EventTypeExt, PduEvent, StateKey, state_res}, trace, utils::stream::{BroadbandExt, ReadyExt}, warn, info};
|
||||
use futures::{FutureExt, StreamExt, future::ready};
|
||||
use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType};
|
||||
|
||||
|
@ -44,7 +38,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
|
|||
return Err!(Request(InvalidParam("Event has been soft failed")));
|
||||
}
|
||||
|
||||
debug!("Upgrading to timeline pdu");
|
||||
debug!("Upgrading pdu {} from outlier to timeline pdu", incoming_pdu.event_id);
|
||||
let timer = Instant::now();
|
||||
let room_version_id = get_room_version_id(create_event)?;
|
||||
|
||||
|
@ -52,7 +46,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
|
|||
// backwards extremities doing all the checks in this list starting at 1.
|
||||
// These are not timeline events.
|
||||
|
||||
debug!("Resolving state at event");
|
||||
debug!("Resolving state at event {}", incoming_pdu.event_id);
|
||||
let mut state_at_incoming_event = if incoming_pdu.prev_events.len() == 1 {
|
||||
self.state_at_incoming_degree_one(&incoming_pdu).await?
|
||||
} else {
|
||||
|
@ -70,7 +64,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
|
|||
state_at_incoming_event.expect("we always set this to some above");
|
||||
let room_version = to_room_version(&room_version_id);
|
||||
|
||||
debug!("Performing auth check");
|
||||
debug!("Performing auth check to upgrade {}", incoming_pdu.event_id);
|
||||
// 11. Check the auth of the event passes based on the state of the event
|
||||
let state_fetch_state = &state_at_incoming_event;
|
||||
let state_fetch = |k: StateEventType, s: StateKey| async move {
|
||||
|
@ -80,6 +74,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
|
|||
self.services.timeline.get_pdu(event_id).await.ok()
|
||||
};
|
||||
|
||||
debug!("running auth check on {}", incoming_pdu.event_id);
|
||||
let auth_check = state_res::event_auth::auth_check(
|
||||
&room_version,
|
||||
&incoming_pdu,
|
||||
|
@ -93,7 +88,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
|
|||
return Err!(Request(Forbidden("Event has failed auth check with state at the event.")));
|
||||
}
|
||||
|
||||
debug!("Gathering auth events");
|
||||
debug!("Gathering auth events for {}", incoming_pdu.event_id);
|
||||
let auth_events = self
|
||||
.services
|
||||
.state
|
||||
|
@ -111,6 +106,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
|
|||
ready(auth_events.get(&key).cloned())
|
||||
};
|
||||
|
||||
debug!("running auth check on {} with claimed state auth", incoming_pdu.event_id);
|
||||
let auth_check = state_res::event_auth::auth_check(
|
||||
&room_version,
|
||||
&incoming_pdu,
|
||||
|
@ -121,7 +117,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
|
|||
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
|
||||
|
||||
// Soft fail check before doing state res
|
||||
debug!("Performing soft-fail check");
|
||||
debug!("Performing soft-fail check on {}", incoming_pdu.event_id);
|
||||
let soft_fail = match (auth_check, incoming_pdu.redacts_id(&room_version_id)) {
|
||||
| (false, _) => true,
|
||||
| (true, None) => false,
|
||||
|
@ -218,7 +214,7 @@ pub(super) async fn upgrade_outlier_to_timeline_pdu(
|
|||
// 14. Check if the event passes auth based on the "current state" of the room,
|
||||
// if not soft fail it
|
||||
if soft_fail {
|
||||
debug!("Soft failing event");
|
||||
info!("Soft failing event {}", incoming_pdu.event_id);
|
||||
let extremities = extremities.iter().map(Borrow::borrow);
|
||||
|
||||
self.services
|
||||
|
|
|
@ -771,6 +771,7 @@ impl Service {
|
|||
ready(auth_events.get(&key))
|
||||
};
|
||||
|
||||
debug!("running auth check on new {} event by {} in {}", pdu.kind, pdu.sender, pdu.room_id);
|
||||
let auth_check = state_res::auth_check(
|
||||
&room_version,
|
||||
&pdu,
|
||||
|
@ -1152,7 +1153,7 @@ impl Service {
|
|||
.boxed();
|
||||
|
||||
while let Some(ref backfill_server) = servers.next().await {
|
||||
info!("Asking {backfill_server} for backfill");
|
||||
info!("Asking {backfill_server} for backfill in {:?}", room_id.to_owned());
|
||||
let response = self
|
||||
.services
|
||||
.sending
|
||||
|
@ -1180,7 +1181,7 @@ impl Service {
|
|||
}
|
||||
}
|
||||
|
||||
info!("No servers could backfill, but backfill was needed in room {room_id}");
|
||||
warn!("No servers could backfill, but backfill was needed in room {room_id}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
|
@ -401,16 +401,10 @@ impl Service {
|
|||
|
||||
fn num_senders(args: &crate::Args<'_>) -> usize {
|
||||
const MIN_SENDERS: usize = 1;
|
||||
// Limit the number of senders to the number of workers threads or number of
|
||||
// cores, conservatively.
|
||||
let max_senders = args
|
||||
.server
|
||||
.metrics
|
||||
.num_workers()
|
||||
.min(available_parallelism());
|
||||
// Limit the maximum number of senders to the number of cores.
|
||||
let max_senders = available_parallelism();
|
||||
|
||||
// If the user doesn't override the default 0, this is intended to then default
|
||||
// to 1 for now as multiple senders is experimental.
|
||||
// default is 4 senders. clamp between 1 and core count.
|
||||
args.server
|
||||
.config
|
||||
.sender_workers
|
||||
|
|
Loading…
Add table
Reference in a new issue