Compare commits

...

8 commits

11 changed files with 139 additions and 61 deletions

View file

@ -7,5 +7,6 @@
"continuwuity",
"homeserver",
"homeservers"
]
],
"rust-analyzer.cargo.features": ["full"]
}

View file

@ -281,15 +281,8 @@ pub(super) async fn get_remote_pdu(
vec![(event_id, value, room_id)]
};
info!("Attempting to handle event ID {event_id} as backfilled PDU");
self.services
.rooms
.timeline
.backfill_pdu(&server, response.pdu)
.await?;
let text = serde_json::to_string_pretty(&json)?;
let msg = "Got PDU from specified server and handled as backfilled";
let msg = "Got PDU from specified server:";
write!(self, "{msg}. Event body:\n```json\n{text}\n```")
},
}

View file

@ -57,5 +57,5 @@ pub(super) async fn pdus(
.try_collect()
.await?;
self.write_str(&format!("{result:#?}")).await
self.write_str(&format!("```\n{result:#?}\n```")).await
}

View file

@ -35,7 +35,6 @@ use ruma::{
};
use tracing::warn;
use super::utils::{count_to_token, parse_pagination_token as parse_token};
use crate::Ruma;
/// list of safe and common non-state events to ignore if the user is ignored
@ -85,14 +84,14 @@ pub(crate) async fn get_message_events_route(
let from: PduCount = body
.from
.as_deref()
.map(parse_token)
.map(str::parse)
.transpose()?
.unwrap_or_else(|| match body.dir {
| Direction::Forward => PduCount::min(),
| Direction::Backward => PduCount::max(),
});
let to: Option<PduCount> = body.to.as_deref().map(parse_token).transpose()?;
let to: Option<PduCount> = body.to.as_deref().map(str::parse).transpose()?;
let limit: usize = body
.limit
@ -181,8 +180,8 @@ pub(crate) async fn get_message_events_route(
.collect();
Ok(get_message_events::v3::Response {
start: count_to_token(from),
end: next_token.map(count_to_token),
start: from.to_string(),
end: next_token.as_ref().map(PduCount::to_string),
chunk,
state,
})

View file

@ -37,7 +37,6 @@ pub(super) mod typing;
pub(super) mod unstable;
pub(super) mod unversioned;
pub(super) mod user_directory;
pub(super) mod utils;
pub(super) mod voip;
pub(super) mod well_known;

View file

@ -18,7 +18,6 @@ use ruma::{
events::{TimelineEventType, relation::RelationType},
};
use super::utils::{count_to_token, parse_pagination_token as parse_token};
use crate::Ruma;
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
@ -111,14 +110,14 @@ async fn paginate_relations_with_filter(
dir: Direction,
) -> Result<get_relating_events::v1::Response> {
let start: PduCount = from
.map(parse_token)
.map(str::parse)
.transpose()?
.unwrap_or_else(|| match dir {
| Direction::Forward => PduCount::min(),
| Direction::Backward => PduCount::max(),
});
let to: Option<PduCount> = to.map(parse_token).transpose()?;
let to: Option<PduCount> = to.map(str::parse).transpose()?;
// Use limit or else 30, with maximum 100
let limit: usize = limit
@ -193,7 +192,7 @@ async fn paginate_relations_with_filter(
| Direction::Forward => events.last(),
| Direction::Backward => events.first(),
}
.map(|(count, _)| count_to_token(*count))
.map(|(count, _)| count.to_string())
} else {
None
};

View file

@ -18,7 +18,7 @@ pub(crate) async fn get_room_event_route(
let event = services
.rooms
.timeline
.get_pdu(event_id)
.get_remote_pdu(room_id, event_id)
.map_err(|_| err!(Request(NotFound("Event {} not found.", event_id))));
let visible = services
@ -33,11 +33,6 @@ pub(crate) async fn get_room_event_route(
return Err!(Request(Forbidden("You don't have permission to view this event.")));
}
debug_assert!(
event.event_id() == event_id && event.room_id() == room_id,
"Fetched PDU must match requested"
);
event.add_age().ok();
Ok(get_room_event::v3::Response { event: event.into_format() })

View file

@ -1,28 +0,0 @@
use conduwuit::{
Result, err,
matrix::pdu::{PduCount, ShortEventId},
};
/// Parse a pagination token, trying ShortEventId first, then falling back to
/// PduCount
pub(crate) fn parse_pagination_token(token: &str) -> Result<PduCount> {
// Try parsing as ShortEventId first
if let Ok(shorteventid) = token.parse::<ShortEventId>() {
// ShortEventId maps directly to a PduCount in our database
Ok(PduCount::Normal(shorteventid))
} else if let Ok(count) = token.parse::<u64>() {
// Fallback to PduCount for backwards compatibility
Ok(PduCount::Normal(count))
} else if let Ok(count) = token.parse::<i64>() {
// Also handle negative counts for backfilled events
Ok(PduCount::from_signed(count))
} else {
Err(err!(Request(InvalidParam("Invalid pagination token"))))
}
}
/// Convert a PduCount to a token string (using the underlying ShortEventId)
pub(crate) fn count_to_token(count: PduCount) -> String {
// The PduCount's unsigned value IS the ShortEventId
count.into_unsigned().to_string()
}

View file

@ -1,6 +1,10 @@
#![allow(clippy::cast_possible_wrap, clippy::cast_sign_loss, clippy::as_conversions)]
use std::{cmp::Ordering, fmt, fmt::Display, str::FromStr};
use std::{
cmp::Ordering,
fmt::{self, Display},
str::FromStr,
};
use ruma::api::Direction;

View file

@ -1,7 +1,8 @@
use std::iter::once;
use conduwuit::{Err, PduEvent};
use conduwuit_core::{
Result, debug, debug_warn, implement, info,
Result, debug, debug_warn, err, implement, info,
matrix::{
event::Event,
pdu::{PduCount, PduId, RawPduId},
@ -11,7 +12,7 @@ use conduwuit_core::{
};
use futures::{FutureExt, StreamExt};
use ruma::{
RoomId, ServerName,
CanonicalJsonObject, EventId, RoomId, ServerName,
api::federation,
events::{
StateEventType, TimelineEventType, room::power_levels::RoomPowerLevelsEventContent,
@ -100,7 +101,7 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
.boxed();
while let Some(ref backfill_server) = servers.next().await {
info!("Asking {backfill_server} for backfill");
info!("Asking {backfill_server} for backfill in {room_id}");
let response = self
.services
.sending
@ -128,10 +129,126 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
}
}
info!("No servers could backfill, but backfill was needed in room {room_id}");
warn!("No servers could backfill, but backfill was needed in room {room_id}");
Ok(())
}
#[implement(super::Service)]
#[tracing::instrument(name = "get_remote_pdu", level = "debug", skip(self))]
pub async fn get_remote_pdu(&self, room_id: &RoomId, event_id: &EventId) -> Result<PduEvent> {
let local = self.get_pdu(event_id).await;
if local.is_ok() {
// We already have this PDU, no need to backfill
debug!("We already have {event_id} in {room_id}, no need to backfill.");
return local;
}
debug!("Preparing to fetch event {event_id} in room {room_id} from remote servers.");
// Similar to backfill_if_required, but only for a single PDU
// Fetch a list of servers to try
if self
.services
.state_cache
.room_joined_count(room_id)
.await
.is_ok_and(|count| count <= 1)
&& !self
.services
.state_accessor
.is_world_readable(room_id)
.await
{
// Room is empty (1 user or none), there is no one that can backfill
return Err!(Request(NotFound("No one can backfill this PDU, room is empty.")));
}
let power_levels: RoomPowerLevelsEventContent = self
.services
.state_accessor
.room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "")
.await
.unwrap_or_default();
let room_mods = power_levels.users.iter().filter_map(|(user_id, level)| {
if level > &power_levels.users_default && !self.services.globals.user_is_local(user_id) {
Some(user_id.server_name())
} else {
None
}
});
let canonical_room_alias_server = once(
self.services
.state_accessor
.get_canonical_alias(room_id)
.await,
)
.filter_map(Result::ok)
.map(|alias| alias.server_name().to_owned())
.stream();
let mut servers = room_mods
.stream()
.map(ToOwned::to_owned)
.chain(canonical_room_alias_server)
.chain(
self.services
.server
.config
.trusted_servers
.iter()
.map(ToOwned::to_owned)
.stream(),
)
.ready_filter(|server_name| !self.services.globals.server_is_ours(server_name))
.filter_map(|server_name| async move {
self.services
.state_cache
.server_in_room(&server_name, room_id)
.await
.then_some(server_name)
})
.boxed();
while let Some(ref backfill_server) = servers.next().await {
info!("Asking {backfill_server} for event {}", event_id);
let value = self
.services
.sending
.send_federation_request(backfill_server, federation::event::get_event::v1::Request {
event_id: event_id.to_owned(),
include_unredacted_content: Some(false),
})
.await
.and_then(|response| {
serde_json::from_str::<CanonicalJsonObject>(response.pdu.get()).map_err(|e| {
err!(BadServerResponse(debug_warn!(
"Error parsing incoming event {e:?} from {backfill_server}"
)))
})
});
let pdu = match value {
| Ok(value) => {
self.services
.event_handler
.handle_incoming_pdu(backfill_server, room_id, event_id, value, false)
.boxed()
.await?;
debug!("Successfully backfilled {event_id} from {backfill_server}");
Some(self.get_pdu(event_id).await)
},
| Err(e) => {
warn!("{backfill_server} failed to provide backfill for room {room_id}: {e}");
None
},
};
if let Some(pdu) = pdu {
debug!("Fetched {event_id} from {backfill_server}");
return pdu;
}
}
Err!("No servers could be used to fetch {} in {}.", room_id, event_id)
}
#[implement(super::Service)]
#[tracing::instrument(skip(self, pdu), level = "debug")]
pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box<RawJsonValue>) -> Result<()> {

View file

@ -3,8 +3,7 @@ use std::{borrow::Borrow, sync::Arc};
use conduwuit::{
Err, PduCount, PduEvent, Result, at, err,
result::{LogErr, NotFound},
utils,
utils::stream::TryReadyExt,
utils::{self, stream::TryReadyExt},
};
use database::{Database, Deserialized, Json, KeyVal, Map};
use futures::{FutureExt, Stream, TryFutureExt, TryStreamExt, future::select_ok, pin_mut};