Merge remote-tracking branch 'ginger/single-event-backfill' into illegal-car-mods

This commit is contained in:
Jacob Taylor 2025-09-06 11:55:50 -07:00
commit efa758c29b
12 changed files with 157 additions and 84 deletions

20
flake.lock generated
View file

@ -513,23 +513,6 @@
"type": "github" "type": "github"
} }
}, },
"rocksdb": {
"flake": false,
"locked": {
"lastModified": 1753385396,
"narHash": "sha256-/Hvy1yTH/0D5aa7bc+/uqFugCQq4InTdwlRw88vA5IY=",
"ref": "10.4.fb",
"rev": "28d4b7276c16ed3e28af1bd96162d6442ce25923",
"revCount": 13318,
"type": "git",
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
},
"original": {
"ref": "10.4.fb",
"type": "git",
"url": "https://forgejo.ellis.link/continuwuation/rocksdb"
}
},
"root": { "root": {
"inputs": { "inputs": {
"attic": "attic", "attic": "attic",
@ -539,8 +522,7 @@
"flake-compat": "flake-compat_3", "flake-compat": "flake-compat_3",
"flake-utils": "flake-utils", "flake-utils": "flake-utils",
"nix-filter": "nix-filter", "nix-filter": "nix-filter",
"nixpkgs": "nixpkgs_5", "nixpkgs": "nixpkgs_5"
"rocksdb": "rocksdb"
} }
}, },
"rust-analyzer-src": { "rust-analyzer-src": {

View file

@ -16,10 +16,6 @@
flake-utils.url = "github:numtide/flake-utils?ref=main"; flake-utils.url = "github:numtide/flake-utils?ref=main";
nix-filter.url = "github:numtide/nix-filter?ref=main"; nix-filter.url = "github:numtide/nix-filter?ref=main";
nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable"; nixpkgs.url = "github:NixOS/nixpkgs?ref=nixpkgs-unstable";
rocksdb = {
url = "git+https://forgejo.ellis.link/continuwuation/rocksdb?ref=10.4.fb";
flake = false;
};
}; };
outputs = outputs =
@ -65,7 +61,13 @@
inherit (self) liburing; inherit (self) liburing;
}).overrideAttrs }).overrideAttrs
(old: { (old: {
src = inputs.rocksdb; src = pkgsHost.fetchFromGitea {
domain = "forgejo.ellis.link";
owner = "continuwuation";
repo = "rocksdb";
rev = "10.4.fb";
sha256 = "sha256-/Hvy1yTH/0D5aa7bc+/uqFugCQq4InTdwlRw88vA5IY=";
};
version = "v10.4.fb"; version = "v10.4.fb";
cmakeFlags = cmakeFlags =
pkgs.lib.subtractLists [ pkgs.lib.subtractLists [

View file

@ -281,15 +281,8 @@ pub(super) async fn get_remote_pdu(
vec![(event_id, value, room_id)] vec![(event_id, value, room_id)]
}; };
info!("Attempting to handle event ID {event_id} as backfilled PDU");
self.services
.rooms
.timeline
.backfill_pdu(&server, response.pdu)
.await?;
let text = serde_json::to_string_pretty(&json)?; let text = serde_json::to_string_pretty(&json)?;
let msg = "Got PDU from specified server and handled as backfilled"; let msg = "Got PDU from specified server:";
write!(self, "{msg}. Event body:\n```json\n{text}\n```") write!(self, "{msg}. Event body:\n```json\n{text}\n```")
}, },
} }

View file

@ -57,5 +57,5 @@ pub(super) async fn pdus(
.try_collect() .try_collect()
.await?; .await?;
self.write_str(&format!("{result:#?}")).await self.write_str(&format!("```\n{result:#?}\n```")).await
} }

View file

@ -35,7 +35,7 @@ use ruma::{
}; };
use tracing::warn; use tracing::warn;
use super::utils::{count_to_token, parse_pagination_token as parse_token}; use super::utils::{count_to_pagination_token, pagination_token_to_count as parse_token};
use crate::Ruma; use crate::Ruma;
/// list of safe and common non-state events to ignore if the user is ignored /// list of safe and common non-state events to ignore if the user is ignored
@ -181,8 +181,8 @@ pub(crate) async fn get_message_events_route(
.collect(); .collect();
Ok(get_message_events::v3::Response { Ok(get_message_events::v3::Response {
start: count_to_token(from), start: count_to_pagination_token(from),
end: next_token.map(count_to_token), end: next_token.map(count_to_pagination_token),
chunk, chunk,
state, state,
}) })

View file

@ -18,7 +18,7 @@ use ruma::{
events::{TimelineEventType, relation::RelationType}, events::{TimelineEventType, relation::RelationType},
}; };
use super::utils::{count_to_token, parse_pagination_token as parse_token}; use super::utils::{count_to_pagination_token, pagination_token_to_count as parse_token};
use crate::Ruma; use crate::Ruma;
/// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}` /// # `GET /_matrix/client/r0/rooms/{roomId}/relations/{eventId}/{relType}/{eventType}`
@ -193,7 +193,7 @@ async fn paginate_relations_with_filter(
| Direction::Forward => events.last(), | Direction::Forward => events.last(),
| Direction::Backward => events.first(), | Direction::Backward => events.first(),
} }
.map(|(count, _)| count_to_token(*count)) .map(|(count, _)| count_to_pagination_token(*count))
} else { } else {
None None
}; };

View file

@ -18,7 +18,7 @@ pub(crate) async fn get_room_event_route(
let event = services let event = services
.rooms .rooms
.timeline .timeline
.get_pdu(event_id) .get_remote_pdu(room_id, event_id)
.map_err(|_| err!(Request(NotFound("Event {} not found.", event_id)))); .map_err(|_| err!(Request(NotFound("Event {} not found.", event_id))));
let visible = services let visible = services
@ -33,11 +33,6 @@ pub(crate) async fn get_room_event_route(
return Err!(Request(Forbidden("You don't have permission to view this event."))); return Err!(Request(Forbidden("You don't have permission to view this event.")));
} }
debug_assert!(
event.event_id() == event_id && event.room_id() == room_id,
"Fetched PDU must match requested"
);
event.add_age().ok(); event.add_age().ok();
Ok(get_room_event::v3::Response { event: event.into_format() }) Ok(get_room_event::v3::Response { event: event.into_format() })

View file

@ -9,7 +9,7 @@ use conduwuit::{
use futures::StreamExt; use futures::StreamExt;
use ruma::{api::client::threads::get_threads, uint}; use ruma::{api::client::threads::get_threads, uint};
use crate::Ruma; use crate::{Ruma, client::utils::pagination_token_to_count};
/// # `GET /_matrix/client/r0/rooms/{roomId}/threads` /// # `GET /_matrix/client/r0/rooms/{roomId}/threads`
pub(crate) async fn get_threads_route( pub(crate) async fn get_threads_route(
@ -27,7 +27,7 @@ pub(crate) async fn get_threads_route(
let from: PduCount = body let from: PduCount = body
.from .from
.as_deref() .as_deref()
.map(str::parse) .map(pagination_token_to_count)
.transpose()? .transpose()?
.unwrap_or_else(PduCount::max); .unwrap_or_else(PduCount::max);

View file

@ -1,28 +1,7 @@
use conduwuit::{ use conduwuit::{Result, matrix::pdu::PduCount};
Result, err,
matrix::pdu::{PduCount, ShortEventId},
};
/// Parse a pagination token, trying ShortEventId first, then falling back to /// Parse a pagination token
/// PduCount pub(crate) fn pagination_token_to_count(token: &str) -> Result<PduCount> { token.parse() }
pub(crate) fn parse_pagination_token(token: &str) -> Result<PduCount> {
// Try parsing as ShortEventId first
if let Ok(shorteventid) = token.parse::<ShortEventId>() {
// ShortEventId maps directly to a PduCount in our database
Ok(PduCount::Normal(shorteventid))
} else if let Ok(count) = token.parse::<u64>() {
// Fallback to PduCount for backwards compatibility
Ok(PduCount::Normal(count))
} else if let Ok(count) = token.parse::<i64>() {
// Also handle negative counts for backfilled events
Ok(PduCount::from_signed(count))
} else {
Err(err!(Request(InvalidParam("Invalid pagination token"))))
}
}
/// Convert a PduCount to a token string (using the underlying ShortEventId) /// Convert a PduCount to a token string
pub(crate) fn count_to_token(count: PduCount) -> String { pub(crate) fn count_to_pagination_token(count: PduCount) -> String { count.to_string() }
// The PduCount's unsigned value IS the ShortEventId
count.into_unsigned().to_string()
}

View file

@ -1,6 +1,10 @@
#![allow(clippy::cast_possible_wrap, clippy::cast_sign_loss, clippy::as_conversions)] #![allow(clippy::cast_possible_wrap, clippy::cast_sign_loss, clippy::as_conversions)]
use std::{cmp::Ordering, fmt, fmt::Display, str::FromStr}; use std::{
cmp::Ordering,
fmt::{self, Display},
str::FromStr,
};
use ruma::api::Direction; use ruma::api::Direction;

View file

@ -1,21 +1,24 @@
use std::iter::once; use std::iter::once;
use conduwuit::{Err, PduEvent};
use conduwuit_core::{ use conduwuit_core::{
Result, debug, implement, info, Result, debug, debug_warn, err, implement, info,
matrix::{ matrix::{
event::Event, event::Event,
pdu::{PduCount, PduId, RawPduId}, pdu::{PduCount, PduId, RawPduId},
}, },
utils::{IterStream, ReadyExt}, utils::{IterStream, ReadyExt},
validated, validated, warn,
}; };
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use ruma::{ use ruma::{
RoomId, ServerName, CanonicalJsonObject, EventId, RoomId, ServerName,
api::federation, api::federation,
events::{ events::{
StateEventType, TimelineEventType, room::power_levels::RoomPowerLevelsEventContent, StateEventType, TimelineEventType, room::power_levels::RoomPowerLevelsEventContent,
}, UInt}; },
uint,
};
use serde_json::value::RawValue as RawJsonValue; use serde_json::value::RawValue as RawJsonValue;
use super::ExtractBody; use super::ExtractBody;
@ -98,7 +101,7 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
.boxed(); .boxed();
while let Some(ref backfill_server) = servers.next().await { while let Some(ref backfill_server) = servers.next().await {
info!("Asking {backfill_server} for backfill of room {room_id}"); info!("Asking {backfill_server} for backfill in {room_id}");
let response = self let response = self
.services .services
.sending .sending
@ -107,7 +110,7 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
federation::backfill::get_backfill::v1::Request { federation::backfill::get_backfill::v1::Request {
room_id: room_id.to_owned(), room_id: room_id.to_owned(),
v: vec![first_pdu.1.event_id().to_owned()], v: vec![first_pdu.1.event_id().to_owned()],
limit: UInt::from(self.services.server.config.max_fetch_prev_events), limit: uint!(100),
}, },
) )
.await; .await;
@ -115,21 +118,137 @@ pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Re
| Ok(response) => { | Ok(response) => {
for pdu in response.pdus { for pdu in response.pdus {
if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await { if let Err(e) = self.backfill_pdu(backfill_server, pdu).boxed().await {
info!("Failed to add backfilled pdu in room {room_id}: {e}"); debug_warn!("Failed to add backfilled pdu in room {room_id}: {e}");
} }
} }
return Ok(()); return Ok(());
}, },
| Err(e) => { | Err(e) => {
info!("{backfill_server} failed to provide backfill for room {room_id}: {e}"); warn!("{backfill_server} failed to provide backfill for room {room_id}: {e}");
}, },
} }
} }
info!("No servers could backfill, but backfill was needed in room {room_id}"); warn!("No servers could backfill, but backfill was needed in room {room_id}");
Ok(()) Ok(())
} }
#[implement(super::Service)]
#[tracing::instrument(name = "get_remote_pdu", level = "debug", skip(self))]
pub async fn get_remote_pdu(&self, room_id: &RoomId, event_id: &EventId) -> Result<PduEvent> {
let local = self.get_pdu(event_id).await;
if local.is_ok() {
// We already have this PDU, no need to backfill
debug!("We already have {event_id} in {room_id}, no need to backfill.");
return local;
}
debug!("Preparing to fetch event {event_id} in room {room_id} from remote servers.");
// Similar to backfill_if_required, but only for a single PDU
// Fetch a list of servers to try
if self
.services
.state_cache
.room_joined_count(room_id)
.await
.is_ok_and(|count| count <= 1)
&& !self
.services
.state_accessor
.is_world_readable(room_id)
.await
{
// Room is empty (1 user or none), there is no one that can backfill
return Err!(Request(NotFound("No one can backfill this PDU, room is empty.")));
}
let power_levels: RoomPowerLevelsEventContent = self
.services
.state_accessor
.room_state_get_content(room_id, &StateEventType::RoomPowerLevels, "")
.await
.unwrap_or_default();
let room_mods = power_levels.users.iter().filter_map(|(user_id, level)| {
if level > &power_levels.users_default && !self.services.globals.user_is_local(user_id) {
Some(user_id.server_name())
} else {
None
}
});
let canonical_room_alias_server = once(
self.services
.state_accessor
.get_canonical_alias(room_id)
.await,
)
.filter_map(Result::ok)
.map(|alias| alias.server_name().to_owned())
.stream();
let mut servers = room_mods
.stream()
.map(ToOwned::to_owned)
.chain(canonical_room_alias_server)
.chain(
self.services
.server
.config
.trusted_servers
.iter()
.map(ToOwned::to_owned)
.stream(),
)
.ready_filter(|server_name| !self.services.globals.server_is_ours(server_name))
.filter_map(|server_name| async move {
self.services
.state_cache
.server_in_room(&server_name, room_id)
.await
.then_some(server_name)
})
.boxed();
while let Some(ref backfill_server) = servers.next().await {
info!("Asking {backfill_server} for event {}", event_id);
let value = self
.services
.sending
.send_federation_request(backfill_server, federation::event::get_event::v1::Request {
event_id: event_id.to_owned(),
include_unredacted_content: Some(false),
})
.await
.and_then(|response| {
serde_json::from_str::<CanonicalJsonObject>(response.pdu.get()).map_err(|e| {
err!(BadServerResponse(debug_warn!(
"Error parsing incoming event {e:?} from {backfill_server}"
)))
})
});
let pdu = match value {
| Ok(value) => {
self.services
.event_handler
.handle_incoming_pdu(backfill_server, &room_id, &event_id, value, false)
.boxed()
.await?;
debug!("Successfully backfilled {event_id} from {backfill_server}");
Some(self.get_pdu(event_id).await)
},
| Err(e) => {
warn!("{backfill_server} failed to provide backfill for room {room_id}: {e}");
None
},
};
if let Some(pdu) = pdu {
debug!("Fetched {event_id} from {backfill_server}");
return pdu;
}
}
Err!("No servers could be used to fetch {} in {}.", room_id, event_id)
}
#[implement(super::Service)] #[implement(super::Service)]
#[tracing::instrument(skip(self, pdu), level = "debug")] #[tracing::instrument(skip(self, pdu), level = "debug")]
pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box<RawJsonValue>) -> Result<()> { pub async fn backfill_pdu(&self, origin: &ServerName, pdu: Box<RawJsonValue>) -> Result<()> {

View file

@ -3,8 +3,7 @@ use std::{borrow::Borrow, sync::Arc};
use conduwuit::{ use conduwuit::{
Err, PduCount, PduEvent, Result, at, err, Err, PduCount, PduEvent, Result, at, err,
result::{LogErr, NotFound}, result::{LogErr, NotFound},
utils, utils::{self, stream::TryReadyExt},
utils::stream::TryReadyExt,
}; };
use database::{Database, Deserialized, Json, KeyVal, Map}; use database::{Database, Deserialized, Json, KeyVal, Map};
use futures::{FutureExt, Stream, TryFutureExt, TryStreamExt, future::select_ok, pin_mut}; use futures::{FutureExt, Stream, TryFutureExt, TryStreamExt, future::select_ok, pin_mut};