mirror of
https://forgejo.ellis.link/continuwuation/continuwuity.git
synced 2025-09-11 13:23:01 +02:00
Compare commits
11 commits
83a97e63d2
...
676bfffd1d
Author | SHA1 | Date | |
---|---|---|---|
|
676bfffd1d | ||
|
2b5bbbb638 | ||
|
2e4d4d09c8 | ||
|
1186eace4d | ||
|
9093ee2485 | ||
|
7923e05982 | ||
|
879a59a1e4 | ||
|
864704a536 | ||
|
17fb69e3c2 | ||
|
feeb418e81 | ||
|
6e7390a9b7 |
6 changed files with 3902 additions and 75 deletions
2765
src/api/client/membership.rs
Normal file
2765
src/api/client/membership.rs
Normal file
File diff suppressed because it is too large
Load diff
|
@ -275,9 +275,11 @@ where
|
|||
|
||||
if room_id_server_name != room_create_event.sender().server_name() {
|
||||
warn!(
|
||||
"servername of room ID origin ({}) does not match servername of m.room.create sender ({})",
|
||||
"servername of room ID origin ({}) does not match servername of m.room.create \
|
||||
sender ({})",
|
||||
room_id_server_name,
|
||||
room_create_event.sender().server_name());
|
||||
room_create_event.sender().server_name()
|
||||
);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
|
|
|
@ -616,7 +616,7 @@ where
|
|||
.map(ToOwned::to_owned),
|
||||
)
|
||||
};
|
||||
|
||||
debug!("running auth check on {:?}", event.event_id());
|
||||
let auth_result =
|
||||
auth_check(room_version, &event, current_third_party, fetch_state).await;
|
||||
|
||||
|
@ -735,7 +735,7 @@ where
|
|||
{
|
||||
let mut room_id = None;
|
||||
while let Some(sort_ev) = event {
|
||||
debug!(event_id = sort_ev.event_id().as_str(), "mainline");
|
||||
trace!(event_id = sort_ev.event_id().as_str(), "mainline");
|
||||
if room_id.is_none() {
|
||||
room_id = Some(sort_ev.room_id().to_owned());
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ where
|
|||
// 5. Reject "due to auth events" if can't get all the auth events or some of
|
||||
// the auth events are also rejected "due to auth events"
|
||||
// NOTE: Step 5 is not applied anymore because it failed too often
|
||||
debug!("Fetching auth events");
|
||||
debug!("Fetching auth events for {}", incoming_pdu.event_id);
|
||||
Box::pin(self.fetch_and_handle_outliers(
|
||||
origin,
|
||||
pdu_event.auth_events(),
|
||||
|
@ -92,12 +92,12 @@ where
|
|||
|
||||
// 6. Reject "due to auth events" if the event doesn't pass auth based on the
|
||||
// auth events
|
||||
debug!("Checking based on auth events");
|
||||
debug!("Checking {} based on auth events", incoming_pdu.event_id);
|
||||
// Build map of auth events
|
||||
let mut auth_events = HashMap::with_capacity(pdu_event.auth_events().count());
|
||||
for id in pdu_event.auth_events() {
|
||||
let Ok(auth_event) = self.services.timeline.get_pdu(id).await else {
|
||||
warn!("Could not find auth event {id}");
|
||||
warn!("Could not find auth event {id} for {}", incoming_pdu.event_id);
|
||||
continue;
|
||||
};
|
||||
|
||||
|
@ -131,6 +131,7 @@ where
|
|||
ready(auth_events.get(&key).map(ToOwned::to_owned))
|
||||
};
|
||||
|
||||
debug!("running auth check to handle outlier pdu {:?}", incoming_pdu.event_id);
|
||||
let auth_check = state_res::event_auth::auth_check(
|
||||
&to_room_version(&room_version_id),
|
||||
&pdu_event,
|
||||
|
|
|
@ -1,12 +1,6 @@
|
|||
use std::{borrow::Borrow, collections::BTreeMap, iter::once, sync::Arc, time::Instant};
|
||||
|
||||
use conduwuit::{
|
||||
Err, Result, debug, debug_info, err, implement, is_equal_to,
|
||||
matrix::{Event, EventTypeExt, PduEvent, StateKey, state_res},
|
||||
trace,
|
||||
utils::stream::{BroadbandExt, ReadyExt},
|
||||
warn,
|
||||
};
|
||||
use conduwuit::{Err, Result, debug, debug_info, err, implement, matrix::{EventTypeExt, PduEvent, StateKey, state_res}, trace, utils::stream::{BroadbandExt, ReadyExt}, warn, info};
|
||||
use futures::{FutureExt, StreamExt, future::ready};
|
||||
use ruma::{CanonicalJsonValue, RoomId, ServerName, events::StateEventType};
|
||||
|
||||
|
@ -17,22 +11,19 @@ use crate::rooms::{
|
|||
};
|
||||
|
||||
#[implement(super::Service)]
|
||||
pub(super) async fn upgrade_outlier_to_timeline_pdu<Pdu>(
|
||||
pub(super) async fn upgrade_outlier_to_timeline_pdu(
|
||||
&self,
|
||||
incoming_pdu: PduEvent,
|
||||
val: BTreeMap<String, CanonicalJsonValue>,
|
||||
create_event: &Pdu,
|
||||
create_event: &PduEvent,
|
||||
origin: &ServerName,
|
||||
room_id: &RoomId,
|
||||
) -> Result<Option<RawPduId>>
|
||||
where
|
||||
Pdu: Event + Send + Sync,
|
||||
{
|
||||
) -> Result<Option<RawPduId>> {
|
||||
// Skip the PDU if we already have it as a timeline event
|
||||
if let Ok(pduid) = self
|
||||
.services
|
||||
.timeline
|
||||
.get_pdu_id(incoming_pdu.event_id())
|
||||
.get_pdu_id(&incoming_pdu.event_id)
|
||||
.await
|
||||
{
|
||||
return Ok(Some(pduid));
|
||||
|
@ -41,13 +32,13 @@ where
|
|||
if self
|
||||
.services
|
||||
.pdu_metadata
|
||||
.is_event_soft_failed(incoming_pdu.event_id())
|
||||
.is_event_soft_failed(&incoming_pdu.event_id)
|
||||
.await
|
||||
{
|
||||
return Err!(Request(InvalidParam("Event has been soft failed")));
|
||||
}
|
||||
|
||||
debug!("Upgrading to timeline pdu");
|
||||
debug!("Upgrading pdu {} from outlier to timeline pdu", incoming_pdu.event_id);
|
||||
let timer = Instant::now();
|
||||
let room_version_id = get_room_version_id(create_event)?;
|
||||
|
||||
|
@ -55,8 +46,8 @@ where
|
|||
// backwards extremities doing all the checks in this list starting at 1.
|
||||
// These are not timeline events.
|
||||
|
||||
debug!("Resolving state at event");
|
||||
let mut state_at_incoming_event = if incoming_pdu.prev_events().count() == 1 {
|
||||
debug!("Resolving state at event {}", incoming_pdu.event_id);
|
||||
let mut state_at_incoming_event = if incoming_pdu.prev_events.len() == 1 {
|
||||
self.state_at_incoming_degree_one(&incoming_pdu).await?
|
||||
} else {
|
||||
self.state_at_incoming_resolved(&incoming_pdu, room_id, &room_version_id)
|
||||
|
@ -65,16 +56,15 @@ where
|
|||
|
||||
if state_at_incoming_event.is_none() {
|
||||
state_at_incoming_event = self
|
||||
.fetch_state(origin, create_event, room_id, incoming_pdu.event_id())
|
||||
.fetch_state(origin, create_event, room_id, &incoming_pdu.event_id)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let state_at_incoming_event =
|
||||
state_at_incoming_event.expect("we always set this to some above");
|
||||
|
||||
let room_version = to_room_version(&room_version_id);
|
||||
|
||||
debug!("Performing auth check");
|
||||
debug!("Performing auth check to upgrade {}", incoming_pdu.event_id);
|
||||
// 11. Check the auth of the event passes based on the state of the event
|
||||
let state_fetch_state = &state_at_incoming_event;
|
||||
let state_fetch = |k: StateEventType, s: StateKey| async move {
|
||||
|
@ -84,6 +74,7 @@ where
|
|||
self.services.timeline.get_pdu(event_id).await.ok()
|
||||
};
|
||||
|
||||
debug!("running auth check on {}", incoming_pdu.event_id);
|
||||
let auth_check = state_res::event_auth::auth_check(
|
||||
&room_version,
|
||||
&incoming_pdu,
|
||||
|
@ -97,24 +88,25 @@ where
|
|||
return Err!(Request(Forbidden("Event has failed auth check with state at the event.")));
|
||||
}
|
||||
|
||||
debug!("Gathering auth events");
|
||||
debug!("Gathering auth events for {}", incoming_pdu.event_id);
|
||||
let auth_events = self
|
||||
.services
|
||||
.state
|
||||
.get_auth_events(
|
||||
room_id,
|
||||
incoming_pdu.kind(),
|
||||
incoming_pdu.sender(),
|
||||
incoming_pdu.state_key(),
|
||||
incoming_pdu.content(),
|
||||
&incoming_pdu.kind,
|
||||
&incoming_pdu.sender,
|
||||
incoming_pdu.state_key.as_deref(),
|
||||
&incoming_pdu.content,
|
||||
)
|
||||
.await?;
|
||||
|
||||
let state_fetch = |k: &StateEventType, s: &str| {
|
||||
let key = k.with_state_key(s);
|
||||
ready(auth_events.get(&key).map(ToOwned::to_owned))
|
||||
ready(auth_events.get(&key).cloned())
|
||||
};
|
||||
|
||||
debug!("running auth check on {} with claimed state auth", incoming_pdu.event_id);
|
||||
let auth_check = state_res::event_auth::auth_check(
|
||||
&room_version,
|
||||
&incoming_pdu,
|
||||
|
@ -125,7 +117,7 @@ where
|
|||
.map_err(|e| err!(Request(Forbidden("Auth check failed: {e:?}"))))?;
|
||||
|
||||
// Soft fail check before doing state res
|
||||
debug!("Performing soft-fail check");
|
||||
debug!("Performing soft-fail check on {}", incoming_pdu.event_id);
|
||||
let soft_fail = match (auth_check, incoming_pdu.redacts_id(&room_version_id)) {
|
||||
| (false, _) => true,
|
||||
| (true, None) => false,
|
||||
|
@ -133,7 +125,7 @@ where
|
|||
!self
|
||||
.services
|
||||
.state_accessor
|
||||
.user_can_redact(&redact_id, incoming_pdu.sender(), incoming_pdu.room_id(), true)
|
||||
.user_can_redact(&redact_id, &incoming_pdu.sender, &incoming_pdu.room_id, true)
|
||||
.await?,
|
||||
};
|
||||
|
||||
|
@ -153,7 +145,7 @@ where
|
|||
.map(ToOwned::to_owned)
|
||||
.ready_filter(|event_id| {
|
||||
// Remove any that are referenced by this incoming event's prev_events
|
||||
!incoming_pdu.prev_events().any(is_equal_to!(event_id))
|
||||
!incoming_pdu.prev_events.contains(event_id)
|
||||
})
|
||||
.broad_filter_map(|event_id| async move {
|
||||
// Only keep those extremities were not referenced yet
|
||||
|
@ -172,7 +164,7 @@ where
|
|||
debug!(
|
||||
"Retained {} extremities checked against {} prev_events",
|
||||
extremities.len(),
|
||||
incoming_pdu.prev_events().count()
|
||||
incoming_pdu.prev_events.len()
|
||||
);
|
||||
|
||||
let state_ids_compressed: Arc<CompressedState> = self
|
||||
|
@ -187,20 +179,20 @@ where
|
|||
.map(Arc::new)
|
||||
.await;
|
||||
|
||||
if incoming_pdu.state_key().is_some() {
|
||||
if incoming_pdu.state_key.is_some() {
|
||||
debug!("Event is a state-event. Deriving new room state");
|
||||
|
||||
// We also add state after incoming event to the fork states
|
||||
let mut state_after = state_at_incoming_event.clone();
|
||||
if let Some(state_key) = incoming_pdu.state_key() {
|
||||
if let Some(state_key) = &incoming_pdu.state_key {
|
||||
let shortstatekey = self
|
||||
.services
|
||||
.short
|
||||
.get_or_create_shortstatekey(&incoming_pdu.kind().to_string().into(), state_key)
|
||||
.get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)
|
||||
.await;
|
||||
|
||||
let event_id = incoming_pdu.event_id();
|
||||
state_after.insert(shortstatekey, event_id.to_owned());
|
||||
let event_id = &incoming_pdu.event_id;
|
||||
state_after.insert(shortstatekey, event_id.clone());
|
||||
}
|
||||
|
||||
let new_room_state = self
|
||||
|
@ -224,7 +216,7 @@ where
|
|||
// 14. Check if the event passes auth based on the "current state" of the room,
|
||||
// if not soft fail it
|
||||
if soft_fail {
|
||||
debug!("Soft failing event");
|
||||
info!("Soft failing event {}", incoming_pdu.event_id);
|
||||
let extremities = extremities.iter().map(Borrow::borrow);
|
||||
|
||||
self.services
|
||||
|
@ -242,9 +234,9 @@ where
|
|||
// Soft fail, we keep the event as an outlier but don't add it to the timeline
|
||||
self.services
|
||||
.pdu_metadata
|
||||
.mark_event_soft_failed(incoming_pdu.event_id());
|
||||
.mark_event_soft_failed(&incoming_pdu.event_id);
|
||||
|
||||
warn!("Event was soft failed: {:?}", incoming_pdu.event_id());
|
||||
warn!("Event was soft failed: {incoming_pdu:?}");
|
||||
return Err!(Request(InvalidParam("Event has been soft failed")));
|
||||
}
|
||||
|
||||
|
@ -255,7 +247,7 @@ where
|
|||
let extremities = extremities
|
||||
.iter()
|
||||
.map(Borrow::borrow)
|
||||
.chain(once(incoming_pdu.event_id()));
|
||||
.chain(once(incoming_pdu.event_id.borrow()));
|
||||
|
||||
let pdu_id = self
|
||||
.services
|
||||
|
|
File diff suppressed because it is too large
Load diff
Loading…
Add table
Add a link
Reference in a new issue